var/home/core/zuul-output/0000755000175000017500000000000015136214407014530 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136241135015472 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000370675615136241000020266 0ustar corecoreByikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD p ?K"mv?_eGbuuțx{w7ݭ7֫e% oo/q3m^]/o?8.7oW}ʋghewx/mX,ojŻ ^Tb3b#׳:}=p7뼝ca㑔`e0I1Q!&ѱ[/o^{W-{t3_U|6 x)K#/5ΌR"ggóisR)N %emOQ/Ϋ[oa0vs68/Jʢ ܚʂ9ss3+aô٥J}{3CF*A(-aD~JwFPO7M$n6iXύO^%26lDt#3{f!f6;WR.!$5 J:1*S%V!F([EbD]娍ԹiE03`Cfw&:ɴ@=yN{f}\{+>2^G) u.`l(Sm&F4a0>eBmFR5]!PI6f٘"y/(":[#;`1}+7 s'ϨF&%8'# $9b"r>B)GF%\bi/ Ff/Bp 4YH~BŊ6EZ|^߸3%L[EC 7gg/碓@e=Vn)h\\lwCzDiQJxTsL] ,=M`nͷ~Vܯ5n|X*ǘ;RJK!b>JR*kl|+"N'C_#a7]d]sJg;;>Yp׫,w`ɚ'd$ecwŻ^~7EpQС3DCS[Yʧ?DDS aw߾)VxX帟AB}nyи0stĈCo.:wAZ{sy:7qsWctx{}n-+ZYsI{/.Ra9XcђQ0FK@aEDO2es ׇN# ZF͹b,*YVi+$<QMGhC}^}?BqG!(8l K3T[<~6]90}(*T7siv'=k 9Q2@vN ( R['>v*;o57sp$3ncx!>t®W>]tF-iܪ%GYbaRvHa}dkD̶*';ک|s_}8yj,('GrgTZ'U鋊TqOſ * /Ijo!՟8`"j}zӲ$k3jS|C7;A)͎V.r?t\WU1ojjr<~Tq> `=tJ!aݡ=h6Yݭw}?lѹ`f_" J9w4ts7NG GGG]ҡgc⌝M b/Ζlpah E ur C&`XR JcwB~R2EL9j7e\(Uё$׿atyХ?*t5z\+`/ErVQUxMҔ&ۈt.3;eg_O ξL1KiYLizpV:C5/=v-}҅"o ']쌕|tϓX8nJ*A*%J[T2pI1Je;s_[,Ҩ38_ь ͰM0ImY/MiVJ5&jNgBt90v߁R:~U jځU~oN9xԞ~J|dݤ߯R> kH&Y``:"s ayiBq)u%'4 yܽ yW0 -i̭uJ{KưЖ@+UBj -&JO x@}DS.€>3T0|9ē7$3z^.I< )9qf e%dhy:O40n'c}c1XҸuFiƠIkaIx( +")OtZ l^Z^CQ6tffEmDφǽ{QiOENG{P;sHz"G- >+`قSᔙD'Ad ѭj( ہO r:91v|ɛr|٦/o{C Ӹ!uWȳ)gjw&+uߕt*:͵UMQrN@fYDtEYZb4-UCqK٪L.2teB ˛"ո{Gci`du듎q+;C'16FgVlWaaB)"F,u@30YQg˾_YҊŏ#_f^ TD=VAKNl4Kš4GScѦa0 J ()¾5m'p/\խX\=z,Mw˭x:qu礛WԓL!I xӤ1(5AKRVF2ɌУլ F "vuhc=JS\kkZAY`R"Hr1]%oR[^oI]${&L8<=#0yaKL: JJl r;t#H+B|ɧJiM cm)>H=l}.^\ݧM<lu Y> XH\z:dHElL(uHR0i#q%]!=t_쾋-, vW~* ^g/5n]FhNU˿oۂ6C9C7sn,kje*;iΓA7,Q)-,=1A sK|ۜLɽy]ʸEO<-YEqKzϢ \{>dDLF amKGm+`VLJsC>?5rk{-3Ss`y_C}Q v,{*)ߎ% qƦat:D=uNvdߋ{Ny[$ {ɴ6hOI']dC5`t9:GO: FmlN*:g^;T^B0$B%C6Θ%|5u=kkN2{'FEc* A>{avdt)8|mg定TN7,TEXt+`F P |ɧ<Ғ8_iqE b}$B#fethBE;1"l r  B+R6Qp%;R8P󦟶Ub-L::;Ⱦ7,VW.JE:PgXoΰUv:ΰdɆΰ (ΰ0eTUgXun[g, ׽-t!X򴱞_aM:E.Qg1DllЊE҉L ehJx{̗Uɾ?si&2"C]u$.`mjmƒVe9f6NŐsLu6fe wkىKR%f"6=rw^)'Hz }x>1yFX09'A%bDb0!i(`Z;TyֻΗ|ִ0-6dAC5t[OM91c:VJR9&ksvJ;0ɝ$krogB= FYtЩOte=?>T&O{Ll)HClba1PIFĀ":tu^}.&R*!^pHPQuSVO$.KMb.:DK>WtWǭKv4@Va3"a`R@gbu%_J5Ґ 3Dc3[n )ܗKj/jUSsȕD $([LH%xa1yrO`$BrW XWz<%fpG"m%6PGEH^*JL֗J)oEv[Ң߃x[䚒}0BOnYr猸p$nu?ݣ RF]NHw2k혿q}lrCy u)xF$Z83Ec罋}[εUX%}< ݻln"sv&{b%^AAoۺ(I#hKD:Bߩ#蘈f=9oN*.Ѓ M#JC1?tean`3-SHq$2[ĜSjXRx?}-m6Mw'yR3q㕐)HW'X1BEb $xd(21i)//_і/Cޮm0VKz>I; >d[5Z=4>5!!T@[4 1.x XF`,?Hh]b-#3J( &uz u8.00-(9ŽZcX Jٯ^蒋*k.\MA/Xp9VqNo}#ƓOފgv[r*hy| IϭR-$$m!-W'wTi:4F5^z3/[{1LK[2nM|[<\t=3^qOp4y}|B}yu}뚬"P.ԘBn방u<#< A Q(j%e1!gkqiP(-ʢ-b7$66|*f\#ߍp{8sx[o%}wS`ýͽ>^U_S1VF20:d T2$47mSl*#lzFP_3yb.63>NKnJۦ^4*rB쑓:5Ǧ٨C.1`mU]+y_:,eXX맻c5ޖSwe݊O4L)69 War)|VϟT;Cq%KK-*i ѩQٰ`DݎGu( 꿢\cXn }7Ҫa nG{Y bcWa?\34 P U!7 _* kTuwmUr%ԀjƮĀdU#^ۈӕ3ΊeBO`^}ܖj49lnAvoI "%\;OF& wctغBܮl##mϸ.6p5k0C5PdKB g:=G<$w 24 6e/!~߽f)Q UbshY5mseڠ5_m4(sgz1v&YN2姟d4"?oWNW݃yh~%DTt^W7q.@ L⃳662G,:* $: e~7[/P%F on~$dƹɥO"dޢt|BpYqc@P`ڄj҆anCѢMU sf`Yɇك]@Rɯ?ٽf? ntպ$ˣ>TDNIGW .Z#YmDvS|]F)5vSsiExţ=8#r&ᘡĩDȈ\d cRKw*#zJ9tT :<XK*ɤwoJarExfKB4t@y[6OO6qDfEz]1,ʹB֒H ֱw;SpM8hGG&ƫEJި_1N`Ac2 GP)"nD&D #-aGoz%<ѡh (jF9L`fMN]eʮ"3_q7:.rRGT;}:֪a$)gPSj0j3hLư/7:D-F۶c}87uixoxG+5EekV{:_d* |a%ĉUHSR0=>u)oQCC;^u'y,d$C?v01q5e.Um>]RLa&r?+@6k&#l)I5_> ` D s5npo}/ؙq #a2V?X~.4O/'|/_|&q̑0dd4>vk 60D _o~[Sw3ckpkpLNa ^j 5*<&}kˢmqvۗj=<Tr=[ a^؃ È(<^=xZb [_tܡ&yЋ{ Sym^?̑sU~' Ԓ f\itu)b>5X -$sn.kmjX( vb_DSˮk=(I83[ TJlz|Qޕ1s_*{, YlsΠaK$Vw~շy»Q }U-jbkmA BF"hZR$ҠI!nAF >qŗ 9>2Ȁdº7Mk02b=뼃ft~c.!R <CF5]2yJ!,R q%O 9d35d*T29ͺ-LyPmy68!s]$cIh)Y 8ǣOkY9e^Ѹ_1b,e]Wl/gB68jO_GVYm:!Ya(moOްWS7i`0/!]w$C4Wdu]HwdN/Ӥ!z#a ͻK ě2YFE8Z C9*&wX,dh||Dy!|ЀN“S 2Yaw>Ql)` /&̑QѪo?ӨU:ԼB+㝑ǃ;z =anT eTuDI=0+7{r3U6C\T, NsxL2Ěcs~}uvk#N\%ެDCz'걙ֈ%ք 2D}kX~UGD`Et \udTҫ]T.b؅|~5#lOf&GY#:YT7S~)XǬOdVe$b &6d^pE#]p7AZ'ޅ'1dNu-&eMס>븣P)h$k ݪ@{i fڥ "eX2XK>=H~ Nj= T:ĄB6d](p oVY8ɴA6b22& 2g}ߜ3L"zP[5~6cLN7wiÎ*r@tpf }U0GF2 A{`=BY_\0v2)b Qek1zD<3zdoL?tPU]DP` Feg h4Yqst۷ خ!s5{ "ќxo61h2K2w5lt|Q)ɝ.u(2(Іu=\ɼ8Ӛ5m=Ju[E_t釟w#>|8oN^1 .)5%WV{a7 K/o3X"%0m` `m'RPnz~x1Ļ#xGYP=/x sq )G֦*xA HƩ@B!G/IY4Hw5ʹ1"^7prff*M7gʽw SONJ{U-=i^l٥bVì-g\06k^AnFt r7BIXXrE.VxHv4Ѥ:G9"qY=8˟|1\\uN.AD2SEjFCn$shSI7"Ⱥ;BK}Mx;Mm7$"5ks89j/> HH焫a#& Hz&Ks{<iw+[DqBv'PXtJt:r[k&7?vNW\Asf6(7$A(4.94̳rKdQTyQMZe g͎Tx]ڍE!WMP}Ӳ`le)!sHE2FFa&EH)ng%yփ!cб 9G}8/;o#\Wܙ~=9_o٦>%ix"CEg֦$#xHa>&3-Oh'%tiYm׿FiϬkVB^F΢CnS3FS ֞ԇI=+U<²v/dQђ2kx%r[=X:I{l!d*:͏12W_#w gB7kx@ Dd(1ב?:ׇ^.: qMkлUޭ֐Y±lqLb^"7 {iK,6vT{'f%Z]H>ĻwQdkJH!U潼RbM(zdD7ۓ_:I/ىYU>=[mwzk]*I\̠unFXu}c 7%BV*Yg?0S@աBFM["`hMd~M>iZtayoP`82.Hby['VjݼoǗng >o\HMK翼:=?z׾vS;oM?w3Ɵ|Xjv5jx>n&ٰ&-/E?)\d]鸽K+D$q{F?aL |p4Hajź X*ey` ݏ+LbwT*Zּc,Fh&8'a/T/qB8Jά@.ݏR HgZ`N5blpw!|T`{, . "#y `lΠJClx=AHEe:[ Zk)VdZUVM)I`jqGmȑ!»CE 2*s:˾~a5Z΂ Uf 6s.8z% IXkŝVJ Ëtct[}MBu}|wwwa’:=Ђҙ5%>\z Eކ{@|opFbv}X<*0d?k~P 'UBfE{-̸[BZ#Th,R4&$CР է2W)Xg`W;<8PBA+Tt+U5@Ww[Y|sM'u(0.M{+CڪZ _?wdOaO!j-~OM?gpsׇ%AJiLrޕ0jT~]6 CRI@.zvK4rdeʚƀg]GXavE.# IZ=z,^Ή_vi ,L5=n̰>1IXU Nu @攚H5F ldU Dx" O7(nhd1O("#b3A;&VҤ r%YOOѿFt46xA6IȆG}+qVdyDB/Zt4bz(! }3Ϥ(vr@3ԿCVQ- ܓ|He^T>!|${2sX$p!1Ձl|(A"E˜8&^->r f=O-iqU6=jZa[Z%H X>\' ߞo)QȆEUS}Ox||e9fCD%TW`B-*#䦛Әgr. P"=QHKCO7^J'e\Y/[yG (9 W{W7~ tC:_KJ,3E_dle/{!c!PN)/^繁& H.7ClRIJXb%CV;#؟B4%Z:*A V'㽼C^diXlzؖ<ȣ!MMOQ?e^~gzt~N$zσ߼}WD΃d-W[v+KOC{Ͼ9@rX(R%z}񒔠 QK@&ԄSvq4].m|TY70dNdDMmWI%%W ;a*qi !d;d?>WĘs ̧K S \1<)"ۊB&M:ku*acKxMBI_#ǂss_=4T}La4i RRm+0^K#ӊ!,ZDx^F 0U#Q8tjT.҅ 1,j2Y QuH Ύ]n_0̎`7rdWS- (D2.}.D]Or}S? {;Zcqߛ6xm@m=Uyzo%pg/cco6Rs`H' GT5%:zZsb?iJg3X7(l+Cju 0u8j exU}W:Y#/7tLD4jF};qPU/#rR;a+Mqܛ7;qpUۚ5Tnjv;;64I+Oq&wt|RM+T>yub6()ęJآSz5;)Q_EAVح m3 Ō7$j1\7XD&?]\9Ȣg:$1`+vur?]8=%Ml%.İȖb?A,tpʼn)dk!SV nm=\ȁ-2=|5ʥ zi 8$ s8aK4%V\ t!Lku`+=% h&)RO*GUڇvI`b0ο0qoI`b#FOf_$q^!iA9);q`F:E Ec|֨r[RR1҃UE(Av1Xazn2a/"(9/L1X]~r9_7*rn |S.Z K9mUxBa"'4T[^2.9ȱ ] QAO=Ƅ`!1Z."ћV'rX-|_W8ʐ߯C{{K10>"=a'Kн|~+:)tpՉ8Y4cGT~$)*517l;V 6|~AVĴ{3Gl& Kq+%*?V6N{I_P:9Z\"t%>7ES5oà␻,?XAe0bX@ h0v[}Bf*Ih Km|6d61D -':l ܇Bz1U_#GXUE6u 4.ݻ^XڋX6|`zR$@VU^scG"i]qt)ǪCZA^jc5?7Ua,X nJV!; qoz[X=>NSAWE92g u`Y1%rXgs+"sc9| ]>TS"JNرWB-zҦՅu;3OgCX}+20G.@U#=7) ^EUBuYCrçң~0Ac`u0R=ljR!V*Ƅ\순*O]vÞr6 g _k@BS %fee}).~n~%r~MMp!~?~?lkdTc/wIA>px|ݸ燇*WuxxM?]g)EuXr|Z=T*Hmc 6~\i]u]=ݛoVb}y%wRwOתROmqtuO{ߟ+[{_uOq?u-|?WS_tOq?Eu-L_p_Cv .e ϿWѦUt׃wN`4ڄC~ uނ`b duhq[-Nk"-Kj'32Dz O\!f3K.qx):.qS qYқ>W Rl{y :gkE$"YDE֠Z4xK%k.%tLv7Ull- }c| ]| ęjnli˚| Id Z]0hdmD>hB֡#-&tWN ?YN: 3 xH "}C[ kӨAG4eրG&/EV$Ժ?wϰ:@VcyBFƈ?\H(m!?I#bX9nW ՈRcepO}[ s\Ve;]Oq%]X-RÈlб m5^ AjSؒd 3]%j|l#*˟ R ˨aRڛc1w|o*+ŘOi ? lT z+ZU;=eT|X-=҅CrFwT#\b~?/>, `ۢPltdr$i+tHk 3tl7h#3)vɱxMSLjnKȻ \ >ls&}+ uf^b$2[%ֶ/:掔i2lG~ V85FfwtRZ [wB X16`aSK_!;WK_3U8D'+hZ9| !8RO;"w O@C;'>|tL#LjnۖF+B9Vy"UP۾u2Ja>0ta.]1-{K1`HbKV$n}Z+&kv'ˀ*Ead<" ZW?V g>el\) Z.`oqD>tDN _7ct0D f"!!N\-8NJ|r^*A cist{=xJOd[s)t|2M++XGX߇ ѱ LKV:U}NU*7n-zߞ_EAV$4 {%V[niE|nF;9&-I,t*qȎlo㠙ܲ'w'Xq6\X)ى?Nwg>]dt.kam }Bޙ>ǖ_J ZJz܅E9t6FZXsreHhlw+ [Cr:I`+CLع )%ut\2+A!"Lhrٍ L.۪]ӵ sY4Ew`[x,!9V"R1I>aJ` UL'5m1Ԥ:t69| 3Q#tU)16^Yuatdq>*cct/G~- } :OVA#&<6JJ4E88AOec ܎y,()i7-Ո: :!8) B 8dJ`3ccK/\ct!&i㧃$>lĘ_E6=W|$/ -{$1h.$LG^FbKIjdHJ6S zp!m?e8 "(\N+:Y¦&"u8V䝏)@+|dvv?juj`6۾h#ZdXBF{Ɩ;!87Dw-~e;G\2Lة&*cUAN=Vޮ$D >iԸ>U{s]^l`+df^9c5}nZjA%sPX!8/G:GT0^1Yg~?ldTc/w2MjګU%7ߎY<eyw=xU)w7ߋߦ RۘV.nC_}S}/绻y0߬VݽǛEu]>\_W7uw|U!rx%Ϸ?sH΋P4w`[Qj0ւSzOAР]I,Ѥ1H|T=cK'X iצ) e5 gCj4[5Ui/N6H+ȶ!6g ҫ{.9Uʍ.6_~Oz_Z p!>;d9OUo_KR+|t><-BLqWՙsoY5mi^\b``Q3$[+G~HC]oHWy_B7 <Ǟ'~ d7%> oIɲce+fItb].ٍM?Mmp<&D5m cUu;9 IVAr!-h(@qxɆdw6զ(e1V%/z<\LǪ H` QJW#ˢ+ywWؔciUQ*,f )V1j^MY͝=K.?Q!T$2VpĩL!أ`Jq5Y@FO bq7x7?XUᦙখZ:׋AAdt96Xϣ AO=… 8`LtӢ^")8z지}Z>9JJ WNI;:%e#ONݰBYa/CgFhkq7}|- rr /<.\Ӱ#?6W'oY+1Y*#`юMD(L`Z PsZ ;FL;m-yY|ej,z|9Je ͗.lP GM6} w{s[ {?EhfNò* `c]6]\~ }zd$x}e0!޾1>yMMHO/Q[lq(lDI̵a E&*#Q5_V'ySXjG̍зؠQbQ_@۩Ky DD [.xMTbZcwm\e ˥֊2.J双}fD<}rm!uӲlm7"^33 [f~6O'i y G]4|<(Vwp":lMU&yȓYg-Ȭ*yQ<}4|0sy-Ç`9-;=f_>hڊ5SLy Rf! `R{W<*'"oL/c>rPXa{|e?UG"`NΤ,꺨jVꢚk[u^FHu6rZL ZxSAZiIM4e笻fK10koUyTO~tg$(R% On%D[A{SodH=ska- :Gi" _~=;*c00DeV$@LNюn2ak Xg`cJCӽ%O>\ $0KX*/Sv$SGȴ$bpE`F:5"~ V쬐R@U@s~ +OU@9v3ObʪjSa=<;$R.)CI9[9i:>(E$A}Gy ?yɅn* iRCc{yg) sr2a,! "<-$t(z Y),|}X|إSs8ٍޥ˿t7X% kS9k_:BȭMvqHsjd*&{[ GJKISFڄMnn ݡ]t'$!2DC4r3 =m఻ LM7Bo؄ lEy\ч (+m (xȾ!G;B߯&N}0[~>l'Ќ7`I+] {_y_ˮ$(K5A{ܵ$)`LRQ$Iq -vեq7МBlC25' +~ Z2,E$Pk M;(*[ijB4@2EdT}!d1h*{O_s\ Oƅ`pb 1AaU}aw+LhSJ$$|Х b=J4PFoYv$OBGX}7YƋjwPH(ߨ]K i\:Q֎@bQ2Ӯd'Q=ЂThH:@Ţ݋7+ɺyr%A0`;08X'6tx5KcV ڀbu eK0no'iNTa@-*FfSDsqgKCdETB#u'P͕(|ł% q6b X̉+,M5hAgЈ{fq3.[ 2AnokÜ_ Y0WEa%j*69?d1*2LrfeGjx<z{ޗߴT4?rviOubyՂֲKYq#l'b:3 #fF|; ,+<.|ݍ-drK*5&Ƃ\PR[zRwj=nkK2E~@5D6Lʹ1 y~ɫM)n,Y[EZ3}~C6) NKߌ`.hnjC*S1?`ѩ*ˑ:d"=!dK#)ulY?-j%7%ETrL)Hm+>~LxC4ӝ`1ãK]Eb znɫMeY8Hַմڽ2 -VF*(-Q=Dg"oCUD <- rWj /+}2\4I2DWEmGwoY3]= "a,¸7c?߂tIʎPq*Xi} /0y~74=c,M=m5i[!X3ǜu7PC[f/tST,S#!STm!x e=cf )}S7XT]}O7}־aSUagCTNy;˖ߞP|+Ҕk0(%ż]awd80 {`!LHf$b`1K3Dk1  jp[7`nрdwf [hݙo p읚&.̝ r#čijwF_⛻1 05qtpuVa|aq?Ba;7PYK_ ܝJ @ m  mFxm3 nm;NP|k%D:^xҗ:#B^GNB8_"]G/Pv 8DBX^S1@IX e;7 xi`xG" a E(k)w 6H@b{GsHX`10#@.%Z ,{ W8?N6swAXۉ4YDҽ$@eV4NQ "ѵX?eh1G6B|A#P5 w  -DB!Pqک DĻP(}2piG@!U&GDg7:T?(/T?AݕJJ`$7ݤsfT}8"#=w6֧^\[Ҧۤ.Jlt&x[;m7ȳh' 0dN`on9Vl:ivP +l6'kAXlD(#1̨P 銕0ij ݦLPDhphh遶VIbh ۖI՞VY @;`K$4{`CZhIՠ#C4o { ~Р^kXl[a&ڭX6)?R(<`FZ,F 705d}@fjb'B ; <&%FQ3QaO-vA0[ew$<ڕa`h9P]&L TpnLu,fR<2h*x~RBtAakY$ԡbGAm ;Dq5Ѳx`0oQ#EBQ$H1yU9$Ss.%35(dWˍ]phepDnrHL6஑Ȥ`N2ywF=Krj]HB17|5#8/ɰ,_eu)kn'vHM3=͇0;KթNurCn+4jjQ͕ fu//$+x+ Gj%Nl"}>^ ny@:n`;2F'?Zfq:CxoSjq"'칞rLXm=;5.JbQhnS\$1ʃZb d\ n>y r f\Yih"۳PY_Һ\q/)g#=,օ;`~dxdv 'W1Iɛ,lV{֓&[P+s1K U0L1$"I< `7ln9"'p lZE>sNoP _-7w\q&c:.|-nmK|? qzGM:[!}CMB~5QF"^ ׾ &Iy_,-Y$Q(p!D6]vEڷ" =+nEZ "*zbOWpo*ñ*+VH 0 &C_ȧbZQfQVVv+Eם|d :p.D3ou6(tsu[2ϞOZϹ|5(@!l^Ei6)˯ 9,cᷦtPt{BO$nC!ی)} g*}ҭn5OwuWu ݞVyO݂PoPo B GDB-e-eʞG({"l BUB-' 4X%4؂`{B<` BUB- '4| ʷ oO(--*Gx"b Kq0N 1~XEhMlyIq1XZ`80qWE|vUg$,؇{Rp[Iv-]hJZnE`J#F:>"~EO٨Qsl=鹽l$.XTYOc< K<#oiWyO(0{СԁCq ŕXqS8+&]GM< P䢍SEX?-U= y1N3LU/KZ\urm~f2,$qcP=kZ?W|4LЗ8c/^e㬌G6F?[po, 1 拿ĝ ad<,êKK2Ám>][V/Jà),OE`3r^,Z詀Lɠ ,@U5d YicؽxKheF˨݃AAzza<*ғ<$4,@ .=0> >޾/%j}d YݹD=op#ETbn(xcJ'LeopNleWr/Ƴ P.bz)muA82X_yvRXdtڳ "*;ReF5rOy' MўG{zU\Tg|iSRdYNL %l8[E/Yn/~>U(ET ą9K$fdYOtpgG^ŽNx4Bdw:cMg)gyJ}x~`髼qq\ijimO{AICS\g4s]1x#?Ow,j_e@ZJZVeo~}/>Y5-uZ~:ԁ?&xkEna-^UZRskY߶dj^ nVo C= hWe6-Z=cDWE5m Mл1*KJ Ѥ1J Lh '-n&7qbb4L̰q<54e,5 5v- Gå{YkN h6jWo^u78HOOĭ =Q?zEOZ<$3RFn KSّN6'd~S]+ ?eK^;DjMRfhц"V0J ]\ig'PuD2=v{ϚOj_╖,!7gi4LcOY!njeӾGN%/V\վ*oȢbN-hc /r-WXᲞ//09+<׬55(.*YMɳ8E,PWVgyX1m˟1%folMdX9ΐO l^fr 8^R]]aab#z5QAfku[{Wy-QR'kg})`BysVBrְVGnՒn59aZ%Sw("jh̴e}ړQQ1,>c u<ړ?MuvQT\Q%!;fZel|IGDh7DT$d-KIi=.mb(7녇yTˢ 磑 "y%5u&[`z-X/Hݠá8 uXDR|\ʑviQjhÒk6 Ǵ_)A[Luj1bv֧soڜ7mLWD1ab R,# 7 ,b€nQٖ5@ GY6t]K'9*/;2d{O総26%}C ,:iO޵$׿# X @Ldl-濧O9Lqem͸xQwףOJ8|vǢ+q_]n5Wj/:&_~CV?oB0c){E{lW]U>kr?W֯_ 5P꿺u}=_'+'P?m=_W}7g7Қ>6L0F#/Կ*oĮ {O_uZe_j=z8/`c32gM3 x|s\NJ4"`Cmt=]]=zSzmqs 8?&^f,eVkZ5.4ϪiK*/ewKDY˟{m4dy4_hrCqL~v!sV RĢLwIh˷p|g}^H<298Hs ʴ+"fG_l'N,-.5@xE iLh4-RA1CX96V/Mppc]+E%6u,:Ng!jݽ> =gV Sε_5/ƕuDl^41mlF7 iwDkHh6bt?mcYtCw[ӭo ~uΊ%$me[A7c#@PjR,FTpceEmcj /eJcֹo-$ѵp[W80a pL4uX8k3ZU÷{OŒRZ. f0΂y28v 8uL+Z( YʠZF,KXӇ4ϴ5FFjU":n`3x3˱w!_X= ϣϘcvwT(~5^'ME~Mo߯ B@1v;TaYhMn5DI XbLԩ?StxYiRˍ #ڛ(X6g%kȔLz3l<(frbwzQ5c S}n_ՖSH +B.K1XR]T}rAe- vrF/ft))X4٠n%h(Pȱ6}s3)[L +,4y YX:+|yܓpk*{>Ta3A*u f2u: { 0R// f&MˉI-_?@Ps:}Tٜ)(8 *X ٳVRZf A#g;c~Bp}n&.2|&IB۬! NܯK>ԃ~E.x4@uZtp@fBDA"kKfaznsfSKsS!^5^,iis;1X5{n4V+`-Lǂy .آj͐`FC.3]O1zƁ0L3I>v CTY!-pU.% \qHpa0+=s+&d15a]Xb3! ɕ[-`f3 5_i3ݹEbDVXkE}3yp_ૹCs$$iFKŝ DQu3~;>/ᇹ `rPF 'pk3 pƕ EB˿@ jxV\^薢s񆴜-AKcZH8XެXZmg_Iܦc >Ck N-r /UbfA y'&2Ę!.ڸEN;UXxIp^ilwڧȼLEsZ 2X:#Ip\(q:fQH3(҈^Ea])ϲ Z= IvV(X3_^=$0JspNcArQ["&Ǔ~pbQ )+#JEi0#̐׫kyokeD;]k )+Ba`Jn@DאA5XzG½aS*FY\nBT!@eֺ$\\Q !t6d#;V-5E L5&י~Gi6YvpDu@,nY %N(c1էOr.לƚkp"-Ao!lپ'p^Vk%kwYYشwY" ci"\sQ_̬cgf`Q;7󱳄)|cuNk9ґ ,$%]zH@f-$a1 Dr tDp(g ya,i$,?3zyc%[R&\/(5z9>hI(^Ex/(V et# #~LQ.uc!z; +6pBf{?XG[ y0rX8қT&ZYĹ s7LP@tjҘ6ϳO|!q`R: Ko^/@o6$eAEb+ FN,+xi ;6&N#i$-wQI>+ɼzpuՊ~f QY׌PpdD 췕h5"$˒sϛ:o_Xb ~\~GB>ݠ /ۖ"+^ȣ].OqCY,t+jQ$)Utj! 8XvY 5L8t % ZX}3hZ{˃LHo_Vj_}Lcq OO`VͥzN̠~wԺ>R &\z$(}ÚG+!`;RVs/p[:N1=k8:-ӍhףG0W] 98SR6a(egE?R $'36ԉ)xgn Oӓ,l$#& vT",x - ӗKU=װhIO=z3(?tm#q0F%"R9#Shcn"A܃UY*RgccO}T\h)cClKc]V/%D]tzK"%Ot6Bݝ2HG1Z^|zt:;ul f=XK){tX!s!v[Hpt?Xޯᱞse-lDB;Z\p2rPDb!%JZIc }\]O4\ym[wt=~u<^ YE3g9-3ZW&q.JoDѕ~CV Hc 9+̂jLrkIn0iRb6Fljf=lą]e,uGb'ŬL;eX2)HZ&W-%^mL'n4gLhR(^7DYDV/j7ЏgAz޸oKy T,c%H/ Iyw;Q-p (]*G0\p F=3A3ש?sx420 d "s|ypWoٹ0 ӟ^Ty0U?>ӓHB/l ?Uɫ>FG\EOIO^\P~ i_ d~{t+C[Re_0F? D E>=} 1d4^?0N̨:Ĉ*zH*(?7KֳI:rb=51W`2M:gѸ5ԝZ6[߃+Q pDAEEWه-LD ǣ0^05zÅՈh;jJ](Y*OTo.H2 PQXzt(E޶RIZuz.2 fT"*Nx-9A6f|b0/1ܦ/säӆRK!8ܖ^t{T/i fc\Ec]bM4!Jcҝ^9Pv^>LOOS36ڑ(~9K6gLMF'/GyJ.^V̅fpVt׿ՇMIVב.ϻΪCUI'sRwrFu;p[%l67%X@Yzb)8/亍ĭ Z`ns+1+cu/g~6?pVS: x &825D/gtMevի}[rNխMyL(~/88\..L'X~M] F;$#EQO0"NNzYA|P'.w7?3xn}o~Dp~Dؚ##֗rrM]\W(YQ}Ǧ37 ˕KiUY _nXyzu:~(<0=HfbP^ٱRk}U O9`i* Q=dSwTT?bvoE{1i/ ʷ1.N|aFv4B\-yi %Mu@^fj[nR54ܧ+.<W8<|}*G$O|G[Wz\z$Uh7u 8{/Hmbl_w! K4eP]zYKXY ϒ/P /ѦmX/Y5Atn7U< q;nLs= 0/xw{ }r^&jl<{@Ρ uOe|ӹ3+8}2j Ȧ&>;gPY8 f9)+xWI8 r,Y8(ISQFēq1}2G3Aq>I ~)_ɷ|"{ݫ_.CeI-KWN <1&x{?HK*՗ S[免"H\qt+쁻"ʀ2&s'Y~!UP$wEG;гm;*.N]39)@o`5-,oy P ӽU%eh& 4eC"FH9bNP$BKKC ak[FێZYFn`o5ƾsn[ IA=4h`\JCa'֚c:etmhQ+h-#e% H4 Rpat._F1w@,ekI1."O.;tYboN"+z׃"sס~O$i`5SU}H`Xe ^ r.aJwv`@I)&Jg aX%MkYa<"MLջ7).g0?O8o V,TƪoB(xܥkrHU~PSY|W茈D@59#7_y]yzz@ }X IA~{sWPա\X咸3wѽ(ʁ(\B[%\4]fߜo UVժꯜA}K|5ɉmB ٰ J:T E6 K`7[_4|J9P] *-/#Lx-1&RULv\xXIhhP5s/j):h¸6X_Ӕz彐̃e"tx&h 6-4<ĠζCfs2aȶOh|GIpIkq켁F eUIIav!- {QR7bN28ArԚk9~ِPmHL>K"l+ B2)!Sn1g K/61"}tK5drR/u6z}yGpkn^~KTsNL`v))A:)&EDQ1 A1GuAgqr_m 5V.@ X4.@]DZM2'|x*}- ~|۔X %JkJq S [ $K© ^D ਤ B#aL )=z8I3p%i P f<#(ML<뀨*qPpĂY;4RG 1"@.#Ww̥l(4>~-+2~)|GhA9m)JRpM~I` G A(h"3 ]t<0@+ VJFZ]Kn{n'HX-t`sϱ|-`X1 ٻ޶$WpܚRfIL&H2X ik#QrWՔdʶlQLqap,j:᠕I2hpx,ԝ8ژ8\Ek"<֔nō1|=֔ngLiѪSNܨ}zW=7na}PҏnvC;QUz,Ԛu91!Z(N6w*&E AQ\{[Y/!?yYtY&Q"_;z_%.fi?ϝGB}#4jPc\,{]̟cAB 9?WY?Ϣ??_+piA'Ղs؎dmoVhp 2hz:8w.KlyaLt(ShxCdJFhX)%gx^ Fy_;pϞOpDGԥwz br aʑޢHJTeVZ0V2؇ӬzLP3dBr8ATMPޜn\ݗ/Dg"4V',e*)e`ʴuI&$bE9͢dj #oόxIeJ3Iu X 0! TfҸ)u+FRJhV>p멺ڕO1mlCR$Db8*֌ĠLDciLQ~/EH؆P|Mldi:fnhM,IN؊kFfdNu3&'QD"P!9M52]\<@ݙ..⒨nqdK 97<ɴ͹Ap; |quW@ᮀ!D*^sLw- &2 GEj14r9V*R(bTbt(a,iDג]0rNX]@|NXq;x1uHK-*67$б>JJ$1ǒXṍvf,ƭŒ.ڛΞkꨥhZn,86NQ zXRAͨ;CeM"#*e$Q.R8P$D[*O0.+j*00DpCSDeB@8XY*q6vr)$pb5*F5iw Vp) hmmp.7 < 'U=!k#6zUz<1}P*fe[>{2RjʴRjeR؆hY+[b:0A֙`ٶWT"aZ7Eޞ'#[lГ*QA((?=O]4^8&)*^#(p45Fh:c[;bЪbJڔsi*ߜnõXs=8Y./geA*%Zs40"CvW(r?[܀@; ŨDܟimDluV .+pe8 Hգ) BYtnNtrZYPcC»;A]lV JYW +_f-ip֥`նՃUW (eSoX Ѻ܁g,`&;xWN )ڏ +et@Yi4T6 !Izp-銲Vp)t-$/Q~/1DM!Z=5P:rE[fM*gaRW|w1ŌD3g w%N4(fgg+b^^kq,5(T\7eC^E cm%ԁ)h߉ش=-GLEǻ.pBoncLV{ =KY^UQ05a*AtK%Php|6=MbM-QeE`--RKVǸӫav_R0o?=zên|9D2Y&oi&08O/6a+_๽tO.9O, )*bKn$I2NS$!J),k hTϱql4-T۵2N?]sr걬ƴb{ǂL !93UVYK~0 'R] 0%m!w \0 )7qE7 JIOs+^ LA\.N.h1`ᦣ3P1zo-;c,Gʥ∮\}.lty?l(]Gh;:> ^P8uhQ!e>aG>iƃO@Tq ˳|V|w6ry  EWpmpT#G9e wW6^z5ͮvv !l?r}iC49K,2:ar0> p3ÑGh҇}b-9z#~ϡ9f'=xld)]cyZW%tTRǻ=SmPf?p-JHv0\Q5v_C[=yHw_Zg]CaJ1`VpS^Ԓ`!t[JH.:D3˚mՒRSeJfDXeWΩEh*SM1gIoTܫ7|lؓYkn[7Kwքk] <}x";f@hpgRr({׼--Sf}`nZ޳Ph9ZHHF{u&;2 ^Gt-z,mkT)̙As;膩T?C_ӫڶ} zGMm{_e9{{9ô=,J[Ll#+[9'5e.i,ug4 U]M(*AVԦ^q\I~lxV%,M"@N7vI; .[s:o"י,pM[}KnʽKd7+_SeFDN-̊U?70yԶU ps1̒2Z%H=}u4=-<eS؞-r mL 0stD!yrB*{Y2kWX&O| >{_܄qVR]Eo'co_3@AN\?-=\PRQ~F}AQ"K._/A¥5`*c ؊K.W7I]ڭ6NP;:?fWԢjchbier)HW? ]s:OK>|X"^RML3c:??w=]~ +8v/sq^LI_\+〛O4ZUH1w:^o^Lm2`l):Eq1ӻsB(M,iRLc aᖽ.s&WlԷAD'B`ť %B$Nnf T* ^MD8MJWyf3<zE4N"[̠}z+W.yD} 򱻡4&aj%L5]+1W%fsU9=/nSq$'|xށ }?ߠwiPbCn㙔Kt]ѫ4F -l^ʧnP'\ڞM<^/V@[пz?OC^FEp ".$$;JCZnj40c4Z01EY&"لieU ـU Ȫe5O+􌄜8U,d(K&nğV[s悙-eeiQ:9adDCGE0!Ykg/ T[=:/cܯw~fQ.IK&& $&y>nnvˈ pt gn: NGE#1+-v0 =k͐i<{NYY~y0a5d:"r#8dU*p $hNVTl5;4K~fP6b8qOp QiJ+\Ѣ{_K|7QMR4z5k|cB! cgB Ӝ2l=_3`?&){jzy|ޱE_xY"k;\w w6\ogQ)QTLMq`]e7e) ~ׇfQGF,ï}5gA:?}\>nEv;Bq-cY:sU&UIk] +q8H'ˏ)N#&81&Dĩ:B#i$6UĤZbuY WQ n,ZKc x0iɵ%1+$/v_@xT<'I1poa{Ue߂Lf~Ӣ?On*qoW (IuQH!'闳l7p\@p߿18<~Q,~$ahŧy dbW>bnVIgGXpn=)zdcخöBG^ska ~?{Hr`O;s:Xk`׾|JlQ$nHvdT20̎jMi]ΛW\F墤זX- D3 T"?";ʟ}73S&XjꜤmV2R L`J/^2l(";~HLW\<^ J6qxVrWHtʩQՆ=BUW{>V]f嗢M x59`u.b]5ص^Zy_5zkfMPMXj=Jx7w%f}3:+R֩^T8ZzS֩@'i~z b|r?TNNr}7xkE]vRC#|)# Q^Iq4&mc"r;ja=|P['*x%6߀(0 a6|Mŋk~(쵱>!mLB8Ӎ>(yBx#ݯf4RPWX(&aR^"hKƙbY;rxU"n_(j$G*E` Iw;da{SH1TNLv7AߚhcB:dlH1xB?(w͸fO^<> : a >r<V}vyx3R4DzP"f1jL#niP%ZJi%|g}@颐,10!rx ?`b/)1#d3EۘȻ̲%*#H @$$ mJܒ3GیxmQCl,$ e 8.6cj PU{ՉsQ|kIq1#o?/MaI4ܲ-q0G ܑ"GvE>Ttɚ9Z6H3 y]"]3]^ɭS &i Q%$X_*EF>Oe#|ʭ* nn33+:]L#zdXXҌDg g NH^FDh߈(*gftDKK(NDdV$QdN+ȁ1xGUzeT3j~Z(tq`Z3PvW|w#ӍQsٌ#F2qmT妪>+x1/'sm Q5 5FIlշ5!8_^ r $"#SS :+ǨvE>TRUCu\bF\QQ##Hk"s+V|K86Y]lX.+\r_+<7x}Z3D X_6EK8v*`q;]eM7P"(so>/:P"Æ$ёV8[M kӱ]%VµTQ䲈cE E>TbF{.*D8Mw{1'^YR29|ugn|~eU%JJ3nw bp, 0-uuGEнl`C.-™F,=ɯIQ "BP>K C1λȇ|Xр``l)|W'-s1+yoESVYsH c q/r@N 8xk c$hIIYɣn*a[| ⹶"ZGkj|cF-ܨgGR$HL|kt0\⃌IFhdPsZSMޯ1GL3)G-e>5% `S03^KW'ѶvQCGgfs)oouv*lVAyx: !NX6q U$`9) 1A)L|^EAh((*w#}-^4[FKy#ZcVMc@ۮgp׻cCFͽ$΍r].z?)Y 2v^z"66K$')cij1|~%eӭ m :\)Ҁcȥt:'t8V x+NPCU.[3.A\P+J=iW![O|^E?j|ʕ~ o3q)L[V4}?bn?P5۱;I9g_qdC9[0e׳ՖgG%L\#5|y=3P;OCn+l-=\s-^Y#M$ýgYR^T<Ƽqm"r#'pA[{̿4Y}6OI$hʄt$S.RiZ.tfe|??ctDi oH. ^N*[QN$Gte #Xي"V|ߣUr=^'A"[`ճyťL_F=w. ~D &&z[[7~e[V#R j{Z e,@n 5O|i^Qmȇ\& ŸC+Œs6Km~\7ӷ;)`d! ~Q v0?Yh+RC%~yZ=0CFŅBR3?=m{`wԆo*R骒I^QOE +#.vI-Y-8듭Cɞ2Ri̶/# !prNSun˳ƫ;dԌ䕞7=N9΢CFNaND>:dԜ#vc2\%f@Ռh3H78ϗ&akAP 1zOKX'5cZMg;G+PwȨ8dfe13[ TPDfHʓ=*m1;dT< [@{f߸Auj TUW˺̄t79pv0W2*nfJ [gg׎mۼq1 -qKS4[aD>5ynʿ,~5xa5_US7z ^:cE-PzKXQ̱V-DhDHjw-iU$⑶B}?t3=)vD6E("MShHSm)cCёBh;R(*q;|Qk.W{UQL68j,3j\[E0|gOEQqsbԷ ?҆  WRTQ^Ίh+Dl̎lWGBRP2͞& Y(`f▆6<3"e`Slzqũ%FY?~Uγf; FM2T䅒 $q]…f8^ξՖ rc U;dT(kF/ǚkK*Z[fM,YWyC,Z.-<ن(򡺵[]n/xRTUQk. 4)%kIaxPCF ZS;4Η74ͤ{Q5#=Wq RkO1ԇ&=k㶑u.` u糝+H7pn5,ifw~ Rh6WT;R vB[ {Bį#( y&b"(>h,YTHYnI&%MsYdci ~)ɯbqVC=>0Mkv3;;nip7^ksNYvtm+ 494+5)@RguC8 s#{?.bqۍ".Z;jݥDd9xo-_JzB])`P_HGbdZ_ۆQm {e\2J\:Nj &`-gY8gb >d5u92EKm "}r1k$'^MkIWܡv჻ihz53arFbA8>ON85/^3[8-(@c,KAwӜ6焖𙫂b ǚSc iD'$ ;"֜Mz[=C rt4ޏϸ0 y󜈔:bBf7\XI:p6ӧ9R*BMNAٸ:J>yxx'9Û(Iى󐕜wBd|T~NoG\.-"wc͂r Ih[siD:cB]ā5 I7*(Tݓ}xi}0d2&jLl <#:p|ξ? S&dnq"@8/%}-(ϩR^ՠ r/~YwƳvPO Lެ8|͘P'g?$I:kFW?2ʅ (3z F˼{J{_ ԵrA Ǖ#''λĹ18LS;^A$8y]e79O_Vت:?[ Kiү+ @ _)@%~zO?Uapꛧ(~SGތ/ fnA6qk.AN-ѳszJgxoo`7Le7T5ڝyf56(CW@\-w0{VFpڦ#d ̲`y:.Y!a}%z)=c(!!B\w`pP݁W>-`tre1KA:.8J0vK=-ګIUn2IY\2Z$~Rƛ1<0.,0ϤR{͛_T!xW[j x}溻Ś w0tv]$˪KoD~:R iVo~˿l g]-* 秸Q߶oz;z~rC]Rp]stUF=\4$sJJ!lߏ3_|m,3)8?܀\naIvRw}~?Gv`6fRۙF\Uw`6u20| Q]<2к,IsB!?^دy,eM@SrαDZ+4gk\\wo3v sLsVyzY%ԫ<:ebT+j5y&Ì1s0G7Y?YM<&Bxة ̇v+WLu)q!jOڒ邁jg'J^`!m0vYėx1+x. | bKSzbs1#n$1 ΨܤVelp3Pnq~?n|~ 06VO(a4ZnL:* %D`۸#qiXU>efMHl'#?`>g6,i㩵 ^`][yQkS=9?;5G`ыn]:GQWw:Y=j/̓4xk duNCY|,SiKosE $V)>6:Qbf}(N[B7"cv2`=:FbJP^:++R Je3Jr$%|`]ɮw*w$̮n--,+҆Ra%쩒Ley*_N4ߖ7|*ռ/) b4P^}e3z+<+sRveꇏ4>dө)2a E()\ErV6-yA) ĊeqXA:x,hR9J RatR\07BS1*XnRK&B36Z* 3T 1-"ҕDi(@ e܅bRxG`uL xxDŽ*1I󔃁fAŧZ)]n2  Fzxw5@`lxFNFexs> 9rE \L ʨII1|4Ś}vTvD0LɤX8Ḿ@_NI0@c1h[C:|+{i"{51 #.{)1n YlPtS~׌yK&F Xm-mZR}wx U>eO\Ff'n4;DB4fJE(T._Tq H}y_U5EKP8:ѹꊪp˜b "8)BN $=M*K;Wu֎r߅[Ew c^CO?xDCu෪rV(wA݇š,V4uj,e~՘00;~J{3eEuŰ](ݢfK2L<]G`cx`}0R,b8U,:^}jYTT[݀8f2GGk O(>ˢʃegNS*CO*,t/5`҄R\,jIV 9cD]~ Jd(T"d8ʨbIjGgYDFk\(XL|^D4u,,^X}vČaxڑ,V.;Oޗ 0] s+10ZN3DPv[{QoXQnWp3:zw胣`ʁmċیoE``*_Uz[ͫ;16|LeRkC?az%Pr'h>rGgn8,]45pǦ`vV皔y^d:58>4VA6f`thFvFG vb.qBs7O,3&eaA"D)]^Cޣ|=FWv G .D9ßVgG΋u֗Qam#G0/Ød0 ̇~Mpk,yݒ-$Q`c&Y|*V= &i#VY͔T@ ZCeAdU˯;z->]{:$N(Jd))zMZNRrEQ)yA^ƄH*8{0ս#%ҊA=}<|Oǖz3aqز1騄ХNPW NP;)U!= 6KևLkb9d#p@=sZ섕C|yTQjxiٸ ̲#noŋ'1&,~%à:9`C^e9=t޳ V+4.Ǜs#A uco$ZSPUJQ:,4)B 1lrHW=tU'+4U+4:j?ZɶlqqR5+ #޹Sfc5)FP 8^-uq a6FYwBYq*?1P fy5s8'QO\;&)U|2_}So/ Q7CᘂKY3ǰ4t ,7c uaWz]H:VtrM H8ߖI ua60!r_H_ \y\}GU_ hx-cӭ220ïR[-_x=| %ز14K;3(_Q_9Rp|ZNկ u+a6F,i/JV 9b!(2ƌHnj j)29,0CQQ!>TexQX%Y2ps]վοjCg1NjJsO?o|"V }MUXp*LlE ,$_ܣ2{{ M=PA@foz{Aj:5 ly75l@_" iɨCT#JIsY*9aK0_?<׳R3ssW YGۃ?_eD[yIl#ѯQv'|ܦz7{_n@;/Ū9|5VzTn3F0ۇ]TP诛9( c(% Na-I0 mWo#,l6l#Q]*,{lr> WsX s4>/ tofE^BλŵK])R09, `r0,u`qPp*2{5ɽD 6h9,g1-_!=,[6F!H,G^-}PP_aT[H\ J^JYJik2Yь꼼H[t/$`'Cs4g`T892/q#^Oy]-0Yԏ]E4?m?>xSa##qv]Suo&*&m7HrTD߶宥[p_>,'?=M?_~\E|V z3hL'G4aA2gwtϻ?hj/ 3[6O~wtF/y?Kd˰yx(݃3o]=O;tމnf;*и/9/퓟B46~{4Fm0^f{H?;C5A0ɵ .W1 3`MH8WLc"xUiv6[ug~ '0-}KE*ټ'ϳtP܅H=N}cOp5'Ǽz lWFN.o 7{ Qѷ NѾ z3ӛ{g\vJ4V-u xwE|9VQU>x\?m'Smy2{'hg}5zN̦׋ǻraLQ}5Po`M as5<KV?OTy{>{=of鰇X_va93BSW+77`ImwCjeOe Ԡ7#Z6c=Ԑ#vW/IoBWo'+!UE`s"2|a M獏8yQ!*s+L &D}Q0lVR$!ϰVy{^PeׅP[6p z6yӮDf Q s -KnQ* K -ho/[6jN7y]}X8,4ua>N+_\xl\HL/,hԉ O'aAKd֟QRz}!0UR+KZkˡw]䢴B)ҍsIaT8"2p, >ʪW@/ Pzc)g)dC'6FJNNP>0bMu; {z %"6>$:-` EkA6ɇ#1[g3EdtĚ}+cGи*倌JI8H*^ M5Hp<5 :0@Na#Y~܌⽉}|W }R;wG_F,#O :ДTەtQUDUI,xQnސj{K诃!U[6FRW[hVA{ꇩcpB__K˃YѬ3rx:۶&(~nz./2fwvf_,(%aVXـA:&:K52č1iQ}/:(΃Cαv\/,}d߾;3p+ ޝVnIUr>sDBe`=Lz ! zHk eX,iۻ;vϐ& 8hRƸHYf:b.TZ?-6lR6)J˽9\, \W7Wwfh(l,^g )I{6z^n1AۛC2yB g[b)wruZUe2f97`e%S06^פL >0'90]Ml|27ME[SoH)e0meᝌ VB٢Kkxvh <9,nuzMdK==é\ɂ86E2!4m~jZS Q1tRPOc SRʊIe^o1ⳮ,R{*c(lU |\TeKMس3s|QA~_)RbԦrۨ`6bX2NvVvUJΌUp#+M7CBikw=c3M.uDEeK-7p +d%i&rO]@U31.i>k/xJ*TE$ImPbWR Zc"kzMހt|K;k;[{`62BӶgLNcZiU#-9CN%ɠ)3 ;[9M% @^O qQjhYUzeցLRYp /z/q1ЬƝ,W!xTI/)-ЍD R>6`xWty(hEh!>J꺼-%'Q2$G{4XM`zC| KqZd VUFPg$7 ;i?Jr9}l$Ea1T5+',r'8.y(")$ @LY8510+1X ͟fkzYXGA.:ZSsF*qY: Xy]vYcJ ReUwԡa╥Etd?mct˜v]!-Ї(M *_c1Fi~\^w"E>xqfVƪmꚪ8HGي9s ?N%&gG5!ezI$x}+6 [or 7VSn4KWf`ʥ4ʈ۩vbpwЕ(PJ[s"'^}U.zb(%).y\踬@0)zJkT;2/q_?¥d]nQ6.~e8(E">G"UfOBNTVW2HfA c2n\ sӬ+xq5i8Pdp`00r(qAYc@}>iSs/RXgDZb2 h;ww`A"{,9ɤc+5C٤vY#?$?||`ڃY},h=2"#hpp>׬θzdD''jfC BI, ]I FavI:ApΏ=+\pqiOgȈ :} ZGUr+uMS< e+Ȉ ydOӛ cg'⊎Mo07O\لF3d I`] ŰraQPq28l"x[s0YDpJ.^+0&:-Ћb#`{dn~yHKj$Xgx18 bI96@=2"#N_plZk(Mr19*a$̌y80j7eFn|c f)k bfZ؃Dv|~//02}]t|Qld[@lݞhxE>X|dT: 7I P/p@GdLyOMVO>n.|:݈VvH."|FP>km8<6ᵔ Ku>-bX.8Iӗ@^ёpyLh 5!D/ߌC= 689Kxw!K}5s!5D,5ͲDTξvli0AȠ{,c\xIX {Iҧ׼mzdDӳ s#5?1nՠ$! -ueUz Ȍ ga6֩?$  )Vi I۽C/?֟Tj9_|C,cKQJx|7[2\`Wgfq@`aF%*L$#p.r#zeu+3yʟv##28'7F v8ʭC'WQB(h^JV70'q8-ՖSֽWzd@d垥yῺx1cuj8R?Hsv?`xȈ JY8̨ I%X[^u gL-uloc4d43 n^*@YU7,toh*Kn/&^̾"$2B#4Hk1T ;hꕱ7\d0vqb"imh/GQ"(hE-)3GFdp{>ScPN8afDU)dEK#_aw::yPc1ķ n՚rJsZYB&9 8@ұuWc ts|fJПwJ  Wv:$GE04ʀHp>\*r&Z' _c`@@ŗ M~ FFdpmrH4=2"z:VG~!||9[4e)Fg^aaX_ SrIj##.8$@tNu~ȸGfaJx#k{6~jm;dZWU7jR`ZgbrIQp? FiAk%.S%[|kU8-fr#u7מ/mU~.'?Zڃwu913yBF V [xwH 4pQ unUiQ #(62]tKqnÅWօ+ @ofELU_(z~?uYӶqcE @yNn\ڮ.__3򛜛]#jg.ebguS5W_)VeY1 aߔ@\|'4Rz8Of5&.1Z`u5M?|7YΛ0*ym#^*%[pXۧ;_z֎4I wgǷzGШw_b7o.&|Z֩9Fv«HrZ]|ޅa$;,Ek"3pVL7IM~,| ~nfek,f@Dg ,-ҽuɻ͟K{OK T][Vea_~iiFC-뵔/@]Ξ{-Ýzض`J]7OvŴFMk6D= ]v`4N}i0Zށq[ F=`7;X$94eɻ06cﶌ+Y;Ekr~e݃o4Z[R)ύ`j [)Ì9]TJɔf۞2olMsw)38aioi[̪Y OMZ~}c'[UiNl+ynl5i=Gt -pX ,or໿* ]_jPunzוׅAgzu+K }auזf·\knC~F/SU[,WύWzvo~~ΐ*UM~UKT}^|~R>nf5?3T>\l;4kh^ _K؃yU_Zs=٬.K2uU<3J=G- RNS'IZIImP2) f={ Y+,O}J`MnP7Sn \Y8>yͮ\j;:8m+#iׄng`ﭫ1:ٹ3wQix`,f:yC)'X lLbҟ%?]1Q3р1T)b+t@HmojRz\Tkn,2۟vs:dK;]Z*tNN|i˖ZX^Woakƹb5Jv*'PjPKɠL(Dk"Yip.HIXv?@lKjf1|%gSԠ 3Gl~:r^Dz㵽xm^A [fNŒU5s+O=cϾxM"V"(6cF|Dc91Ho]y AVlc L1ͮ U7!$+G<. 2N ^2lFF Eٚ !#IrxtQmGsN.tdXD -J)'RHD#y6D[~ Bxv >$+VL4墶 Я~a>pdcWuǃba@Cf"1&Hf(uAyxP^GZIltrK*HBL>H=(xFDD b FрG!eLD ܸAWG~{{Qg=b<ZԺ(>3,_SWXịUh4BkM̨3hQ9b0XJB2]xrFOףŦ峫envQmm&̈́<]daB @*d#"7P^GBB%o7J\ǒE*L<))RRS"!- Gh6qG,yN\3~\ K-J-yw熑sW~yvL//,r=ɧP-^(M8)zUqt5DT{Scqoy sZdD)1lf%BBe7^Dku1>Gi{Q&]9\kIewáq=#o9/p_} 0zŘ?K*Ji]Z8}tim&hYX5ӆ bBV|sp~D0[~8èw(ʚ L,=ao9 v AGN"a H//^:?B RIxn0Ƨw˻/RvqU&oD=f.TSߥjPYWa({}[O}[jm>նw$-UwR=3OUbh*eAmwYѪxr}Z۵><?^[d|T*Әs wsf۲`77Up̕J5QͻhSn,Fǚ/|f,EӅju+s?t̗ڨ;}l SnᬶOp=[Ah*/}j-V]؉Bq-%iN@_:v.(0w]GTm:ɍת5' B{ɂU/ ":C%ӷ*] Ar! Wkt >rjmu3$|Wnڌn| }Lިv֢&䬡zoTUUkua+Y;{jwvJMoOOߒ#+b\ŇDm-NC;ꑛ;T^NξI$Bw 0ڽq؝k1kE4]yN)]&dwxmbzJ/um^SDjI$\qq"'a>ݶN Z$lRu^p~Xʊ'Z>,D =GrA>| ߴZJXm1߫WAp*~kD(]KJ aֵJk$rupn#X0fS|C3σ8yFݘÔ@M%O<]%~H;gt0㷧iEr8Lh~3=3y> ?l!|A7N2l9` Tyqeɟ>Uy.?V +ⴈ0}L͠pAV8[)]y8z0z[Q74ʼnu~1~u^ ɏ&J/^0̍KyŏSxDÐJ39|" 59IЅto0,7'q6+=Y#r^3zJIF_PT&\4/Kq5 ]2{ tԈQk,OKBdIt~x+V'?(3sJ=fS}-Bm!mOalYk"kw7dϫW&Rp̲+ V3b:{<}Nݼ8?b^@t02~2+Ϳ9}Z~溿7I&"Uf#Cs 811XJcB{Qp&wӴmRbd]Z+Iw89oJ`;cJחߛ0<[{뒥+`=^lU[b=Cbs:Gbh9CN seR$m^$ud~]3;/VU&֖V<(Qwq1NcFS5m 4wFm6*GT KIwC 1We峫eJsjgfWzU(8LΖDx?X2q4+chnV_/aQt0%&ao9F}?M{(3L2HR|эJk%d#Q'گO,9c3X04L`D$-_h&`Wt1kcΞӻױ؂jYof-Ro[,kΡ\5;7|?> >)+?cKr&YCb~iU?j~ ѯ`5R1+Slエ|ۆ3nƘT.(dZQLtT".Q1vC-r93:ۙ[ a߭l75鍆Y$0:3@dkGiD< giʭP1 p;I8G:Iac](j9$LC€^2((=쀜)&8 n/< n0O%ڦ==KXf,>XTyoHfWNNjKmw^ӣ.Y! H1tRO"s ˅IdZ*nLw*+HXGaRb&yVQc(,<zqyֱt6ˑxMa#10"Us*a83Jip=`:p(|-ThB]u-8Vqmr*1-0P0blH%#&=jW.kUiz$$Q+n^^ꮆ]7k7T[IdM+ܺ+(` i3rCƄ z&gbd~ |i R ρ jAGҷ+(4aݜymZht9%&S^}Sh<5fr +\,¥pb)/3_CL) 2QI=WPh<80Z FR3QI\|  *Z3ψ'Q 2G՜pRHH<B03J]fB %5D\} c 6!itN+(4a^֮1TL3(#jS`XE9CTҦPhʶf^&I 0:O&x[)eN| kBec91>n.zf%OA:9uQЄyaZ:NQK j&+!R,aB 5s5Hl"Au{R0o66b>x=gCqd` Go`Aa|[%_`YL~>+Ad>)<MI=|o+;LSm@O_jp޽SR!|R GӞ:X}klpuВr ܣa9pM-\|L ď37<:e;SPriWK2z;yUYk^Y둪T#}ݳv;nVŞQ&1W{{҃mz n:}:Y`1R0J]0 qfULJ øE^"φ44co#&WҌ`$ n-]+=!|}|f[0T-{C*tL|(Prs [LudqbHϾ>Pvj!/#oW{SP4ĊKbS ,rW UHYSX M6ᲛοtE8$hT{=HCƉv éLxL~!7IWt?DXKiՉTztEx)3f]|sFϨLQޯF՞Ц+nnTGxȱ47\OVMphZ&ƾ]\b}V%3c YF7A_AF{)6+h:أ<^psSl;^Nj[PG,ը٩- o{WL1+νO׻*Ac0 *,'uW┾[Ujssڂ6ܗ Gf\:ŢJ]C~92˽I#z/:gv{ {Kݬ~/݋} 4nN~年Ά?c(7G"Z¶o+ j%;Mx+]/GU ZDW &-[ͩ7|F}rurX1M!MH VnI UVCE,QWk' $q݆oR %X/ܠwZ=d$R@09,N?F#ZJOfp87`dmqc( Dh,PG3I ?omTD٨h:&cWz=ZgzƧfO:Az'Q/ұ™3A{ )Hb, &D 9½~ -=nWa6el/J+"[>)`^Rݺ:^`$@ go3Ȥ DbbӷRA ƴr$c2$0=aQt(9O )G]Cʘ֒F` ,ɡ4m ̗`Ns&e.TzOJ{ЂyI%Xv]HBse8p%2aհ -(16HƕAISr˃u &[ҚyUTtbF#%phR-ԖЀyNo(ٚEiiPniEВ%cB-km+1D>A#N0_AT˒ tzOy9ߣ{ۗƕn܀VN qIQ Kn2uߩW-C[W-.Ғ[L2""/RcDiKSO{ -D7n%G,4@W@&y90{`Z#fPBh@O.o n׈Xx/~yٍ4~N툇 ,ͣ@1HuŨ$ 鈤JZ(W}Q`?QD[3=1k5 jJX0XCZ!;SZR0 5F\L Ђy>3hR D\(&. &cB -W1dBaSjpy`bw D6O{5K8 |NԹlЂyl6 )Y X,ȵ:|3|&< ;\\:Оe_C VOb?~ 0>!hdrrYGS [DOIŸ$8Yf  phJg -PaDkS*H xІAEnjs4B 楰QG8eGӒf$Q:c0_AJhќ9A#R c `YHEr_ -ׄ|@ 9.`s1!{K M75# cN\# rK -7F4g>SQ hYs"`ЂyEkWOI)\A \FEk(4`^|!DM#DT|TT9)BPh¼ ^IQk=EsEh%Dң5Z0OoRkrQY'p9x`b|3DA6F  $ g 4(͖  -Ĵ|NYd-exrbq| &˃ް|CGmˤMH` a`0*tl'RLg`{9iͰ!؟>=?L_-&3u~Fvj.7Rf0;@r?ݹ3}6pt ii+Sid3A3)Iigc)J;lc4,Ѻzrm1/!RcJI)4&EiCd,9BOՈ>hF$<0'`u&PG8~ d z>#Uf2 iD!l(Nî7[/gV6| ԥD#F ;\b~Q[%Q4֕fו3!Ȩ)GSr}6h6Uy\km C*Cp(' \SpBh 8 `gBE nbJ)&*JtYFW,Y6 V?gaԱc4=529E,-4*vkS*PpZ'5èN铱\DLuXL=--tmAbͨ3U3)c(}Ņ]VIѩ+Y>U?FUIC碭r 4C8)~xi: k_cױX#Fا\YY9 0d2 EqJAIY!\%H/|7=_]~?0˗b,7.b9s>,ڼD;rUy%}:7wY3X &۶qE/`5zrAࣥ;]DU8~:@R1InOO{<USu~lC3tNN+O)&'=3/{ym=Xrbe]&f!Q0wnHx uDd~5:,|w<LE)o#6߰&|.+gvm;wSGg9$xnAuVw(wI{-,8n5Ř94qr"91_[sowXRR23C7Ԧ༷TDjLѴQYL5;M}o^ܔ)8S4iM~vv<>`&7Zo 7vtI ߮\Ͼgj=${>p='/|~~<[k~>u:!]ώ]o Ks:tsZ}J{?q?@]˝40? %RHy0rwź)*lZINA8;πR,_Qx K NrY:?q.E:kkٻ6,Wibv Lf0)1Id|Ň,bPfU}TȢ`;X+\$JH!}cwٜh?e<4e/~jȫb_a@i tj6=[]R%7%ZLj2%8Eb¹ԃ']l | E Yt?cAڶzb*zKoVFXmwkw;P&msݜ`:3#o2}?G-x5tF o%RҫQYe;e{ T5ɡ_͋JFgBS8PPF*qXLf3 8achUe5(Chk8ve5R-1101%&74JD, ol3j0&Ûpx5oMA8Tlqwh|&l|JLCLHiW_zûU- `A%\a뤘 IG/Қ0D` emwzkղ9/QېXCW𙍇aM0Z]ZұRqXtW-D8G .fpF2Ncʠի=:_$WB4VQR1qV{tHba)>P3`5e#ny"{3}_>TJR-aj}L6 ϦŌ<s/q!s} 27|ДƁF.D7=u, 3i07%]lmwTZÃxa#$̬g$N.:š( 7% $e(w` ȍCC:hм6[w!{z:JIے/[էh$Wauz3H'6v!>+  pYNZ/Ic @&,{"z. ;6Czu@:|s/}Zåd=}url,tX@gpz`oݒ |''P>HxGyK*24?Adfj=,J֊,us8x<)?"܂.*0 6Lˏ/l79{LQ^)K#c,=XAb쇳_>O]RfnB5+leVQ/!X"`/D Gg_NgUr!zLrkNy`@,xG`1QxFp 2^l^((+9 L4,(U2xi2%`™ARL`* Q; 0HDj)*"1xm[ Hj23 :6d05N;G u>)/ Ac!<c[X:b)QQkL Ϫl11Y* _q0 gE]SjVjvW'3$?KxY3G|*0wHsup0`4]s UݵFǾ4mUXh5Le,m\_^F-V #H yHT`IM_Z}GicEwTT\檺y቞\}$U"_Nqd:&=,/X^FcD2Ye"2""&ZH0<~r7~Zvtj5S!pv1nor%v>t̯6\ &k+(қv?cp[̮o>O ) qFbN򞭺zx;P;"a6[#IJI"۳b6 zٛ?%f洖9\0h%&Cqȍ?N@P}(>\Mӊ2$FYK&.jSqmnO`ԧ/OaUZrfADO7Rm*?_.Kcr 2TG k`LW띱Vc& T-c `hnD(/2%x/lq`$‚QLAbN"80%*Ű[)rmX/pʶ=97<&~uA-Dԝ(r^fP2zEx#d Q[M*Ffd j0DVd,t5wߌݏWDmvzM׽H:*YE}4 j-$* %^F`HHLBDN6tA~}EY1 )f@e R[9k-!\0Gfi%yX/cHƴ ɀ(QHLd$&A:łKs@g H!c{KZNI봄eLsw<r/4I0EJā),8җKŵn2)b4ü7t4/Rg.oL|ҷD$,/ d_G ɜ3}dFi=Y'jT>bGEPȕO|zN`t9țt!`5*Cz fᓳZ{1[,} *d"[{~sic$IA=&)5nx*=EM:I*9m0פ`l;Q!(8R",RLp7m4HRd<5^lozO]T"=XMeJ˯{`w}.a:_7?&+T†K'$h8ǜRR&9JjD^o1Zߪ aKMk5FҎY Zb^jF,+=Nm KHJPG Ԧ 8"Us*a83Jipd:PPT>\V3Vq.uXE0%Kk>LJƇ7yRxZxO\ U#g:Z-.=z!JTާ=LsH.er\+WWܸ?ʰE'M 1, T`f+,1QSEB{|* ^cO6~t+avaRY٠e>FQMk1i|D|NrSٔsL(QmGsudXDcZ)"V_"D4d aur.:JM6__s'B>Tc=#Y;3̀'@OI0ޢb3&T3ޥJ I؛ t身9\]ʀJwE˳^ոA6]]%ET'/UA'R0E4^JJ8%\b%0liodA; Sraz3%Kݭ`-h 3Y5E 3| yǗ/ q c1eqh_:xaX{DAˋ^mJ7Mj'6liwdKڶ xj x#w&{MRnIU|4N[ƑDRYVW+={ 9RkfTl1ޡ0bí&qb:jx)Ud( TLL;6~;zLHyI&,<VՀE;NPiNYIN:\U\Ccb΁PTޝ8y[r}|y4fa.A&,D˝vivW]n {UǺ3JQYRVpi%әZf[)+@zѻbu+)O(q!Kyq/7LG eZ30$赌D6MVHKDkUc ^| /[}3Jnw^$N塪 {nG9OV&4M&l4o{о|{;v{::_g~YLR_MƾL6e/}9ocgϊk9bEH9t 825kM ZN譳g;^ٳ=kmb`c3j7&3J rD6YWոpy7G= [Z7G XNq飶ěU`̙[Qw?NSJԆР.1wA޼e ~>d@W` >OP fh/#Et$LEt!"'U2` @eE.qVq2 A3 ('@=rl4 {bx7cZadPg($^2#YobA% 9Ij$fXeiuvHN;i;Sv. Xw}N[[?ٯ?1J/ڸ4Q'2,6S ,w K/v<f$=Ū'x|rمB^搚h %E2ly`D Xq*4E('.8Rb&y$l0ǬQFYJy>%h{kq鑧W9\ O.AxYb=4 sxL.CElb~]!sGA,,hANʹu<瀿8PSFN%QQqg!-E1*&D d$h>>OK5 #)TD2'Tfi%12U,YPRhO=33OnJwg HI̦`A@L?.9s;V-L.ldK޿Jן{$-0aڬ%}K< s&UF_Ӎqa@v'V^GkMjO~g\ Iԅ*%KB0Fρ[y,]Z*& "]7@+M?_^2j3(bI$7#;K,9>CgWRP;3yn,= U@0P 6R8rTat5d:]w)]&)x`j"DwPE=mb/.d'Jܨ"_Hib+ ݟ;`)l_l' L}1GXt|k`S qr#N@O>|9s&KlL .3) E9]팔Aǟ.zS$虛ЫŎ59y W=zւE ok <[㭻,{St$|%gqRL^18 XM ;ztH0@\+Z-ą3b1]Z%Ju9s?-j+5=O~*|T}qb2@358_]{KloFԆ?O QtsagPH7n!HL'O> GJG% F1ɚI,G%h"Fm\Rt%y$ _Gc^RG gy$NNEjPsk @]/??]b.?O?};؁  /{M@=&d}yY[CxTmm-ƕ%7} .@Z D> _K!{)!Y̴8\!k.}6mSC} dNLy@% ܫF 1`r#TfucɈwcIO PŶ^WeL;utG xHaAZx_Jϗ?ȸ^.j[X̴Q*c e2+b).ՊyVAӆ{eKN'J@_;8.bD #1xm[ Hjό3U5*Cz 'g)Oc.*53RٖqRk٢@z5*^Du 7 o&˳*Ë_ockKphrёutyBjݷ(UpquƔÙ1f0x^<+K_s-`ޱ>:$T3ZZ!FleVQ/!X"`/:kR1G EmJ:2,"1rRREbD h$-֦$ Ayu|NkuME|#.SA#AL8sj[G/}T%2:7c`)d0sHg[ESbeP '@~Ս0N!g IUp;oĉ1qV{tHbaJwtṗ\u7Eׅ"!?6R7  b"I1~SOt] >ozmx3}6*v`Š޾fXR~4ly銦$= KREmމޑ+^_6ȸkԪu0u5lY;cd6Уe1`wzei|FS0Jh% ٛ=R6bn%aLh Z40 Qd<)N M0 +Cʘt˞֦eݍG߈4ݸPM鲋n>筏Wɱʲ\חtd~|nֵT g#xeH\xTRcJP 6` (Xg#n[`1VYEЩH6ʌ ;0#*$;ASi[f^ϱ>(ؔvMQm+:H\",f_l veBBe,YJL&0"~; =noӀbC+TZQ<3BX,B;rEB! d =!"=AZ3E4^0`(&h1g̢aظ4Zf2q\p464<]ε!,fPPJq8!||;Gw7i0I;z`NO&+L`o*UQeMXh-yF9 Dm1o]n&k;䛷`r;m36)\YGH(򹖜N)bx yf"YZsc̱~2 :eWk("ڻ-[6!Wmm#-me~_ܴ;D]ooh!LÌ<:i~Tſʽ+6, sʒ,Zbw# LqO8kH *PaJpTDqM9eWştɗL\fEƣlAt\a-WZ,Q!GEAɮGA+Lf}CI,~mV\Ie1۶y9)W#Vt1ƅ#ivަk)eD2$j C-xAP`>@wM tjL*R (i\1Oޥm1V t$l`k\@;J{n&ѡD8^e. vk f>K6Lж lm%HUuJ\fdKxʼ Tn.,QKJqlZm|2`O ox v?Zz.uAU+qsفA޶`wN kn3 KSLh>4C)?-o_w:{ۈ("&f;Jd$9aלs:˦>7cFg\c|%Jq)ZΏ|hr^?e@WS3vyf:Ɔ/7Ӹ-5e.FQ0pa2-KRʟe,N2:~VfYǻah׍\fvFߕ"BVQ`uRJYQJ98d8-#awV>KR7$8_4PM8BK*wm'*7AGGOpa,J{X]4j4me큦"~4}΃,XXDG=Pvdl52IvG_%4{*WEV{SU?/K^7u# H}O>vFR5͙23w0Bz\\hzs]Ԙi}B̫4+43}<2bL,7ŘdN)foW?/VGκz<BQm 3 {gֳWu ns1 nq s/SBWosq;n?6V*DX,4.6K< 0}1zpԜܿzM_5wrX/3|B`3F\ef&}ns|à*\!Kaeӽ܋Xg2nkbTinz9I9X'M=,|^M19'@w{+H ZlضfF6xq=]>]wO+('̞0GKuOkȄ3 & \^LkPB$QmRO]}0 HvMWw/}=;%.9*Wwi-HJ,iez>S甖0SB%>fTFCtKmƓV̪ၧ䆃u5ZmGY-AߎxS$D5-H|0P-.;ݪJ a&хvk_j_jWO٨w(-jڟ8x;rt}tyD]N',D˝5sX<1E,bn%3{I;M=M:b|l s,0XSo9FIKNw5#R ESMZC޸JKq%X$B ^Qa`1CF"¤HIXxA`qfq"*Dǔ@SE(KhEW7VC{\é'Ľ%g[e&#QHYu2LwZ ⵌX6MVHKDga%ftxKq|Ml/O-JfZonq`NBؚq3Lu%w=}V _Ʃiv$%}烚^4|SSlN|tn7Q]n=7ZnƇT*w}A޷0=/rW~coѰ?ncP^~ݴmΜ'[r>~|<''?Y3[ZaU&D\$gwC \rÜ9Q [cIb&LVHDI9DŽRn&x92$ Ȣ`;X+\$JCƧx0UeD;kqUtG mg֜/Ի"W6)zp=?#8mHVu~w)J k3pnd[qaud }XN[^[vT{ڄjf#5VP+O!T*%6KXeTP(i)Gc> g_I+){ T}:#Oᩖd*'uѫGR1"̗v97f`GUTe,:u~?PS<`s-:@AZ苪ξO7K: IUp;o O=cF@1gz4‚Snj<ƸPvg}2%mxc}.L!k OOfWq>p۸@UPxyrvY;mEJbHx({O{pNJ+)&@[`8ť0<93Z{)¡'d>]ڿHΚ5Y˗-LL]^ AnF =( fG J_B:`^em) Ӂi]IX2{>KDa iU3PSAbA1\:VN"=riLXOc/ƴ @A QzHLdu4(@%LR#1s)V [)IigMa s)rm:Ot?J\-ar]$sQMΨ9s +erwk~;0#g!C_ox|Xr<քڮ%E2ly`D Xq*4Nca"rE؄ A2 c5(K)µio8z<80A@]E-,th(4M6:Gepٓ7ًݕ["=qpQoˬ!xB{r>+T c.@b %Q{` #I4b%g!8Vj*t+=B B!(A!wJ*_ۏЋNOdKK׭f\ 5+픔`\;?NxN3ONR҂4_xA,_4®i&aAGWʽ^RRn}[߷h8\<1|¹Eޑ\<us2ŌS$=s^zMEN}ĿmXYK-QqIRC38\kv%#Q7*[ j~4z*Eeh 3iwm`6_|Gћ9zkD;0's S&&peC˾VC +o3s ŸqwօKn[[O_^߇XBl߬KC КS<7Cd]Wh'-:,1+ ;!@O&߹WE%ì *A& &ca+&TpeTΚz՞=Mk|G$^oyMG#8]m:,2,XpTѻ,37M&!(^f8t@N&.VȮ+= ͚9gmyquRg֢KHR^%"k{n(Zb<+W(en&M"zH qz`'_p.R$*MYR3'/FxTR8Lr+og.4Lx0bL:m*aVo\Ew|׫_?'e팼Jqw@@Z^޾nt;,hxLc}Y{PjetL\o.]^ 5IoE\nۦBgzC.\n}YsR:h5g"Da>i@T蕃#lNy(TֻLFn3z0s rdgm:7sNx:sW7_Aæh kjY>e):߲4ˇ5q<\:fM>>5(MH}1 8V2Fs5(ɠߎAy;8^e^xxic?-XvxB 1)[`xZd=^+3<ْAZen b DHч@JYJd9څw*hR}휅wBBt9ehoh?$m+Nrf^)-2Ʉ Z 2ୖkcHf/%@SdQ<z! 7 %Jfc}Mf^-ϧmE (n!}0lo?ry'd"^W?MȞIzӊښR8)mm8q9* ,ds`ҠPg؇V|)r-k=":,.Kgq߽?M>ypv+g".x Be2ɄuK1hl2`rjaYic $RR F)!FMKIC*H8}p'Rc/ǜƴPQI.k"saB5j0mg4$cni*=&}LQDwv? 9"Vkީ1 p v ]xmOܘ K; @òo))M4Q !R= =ֺ5"28FC1X*;0P"Lb6LdM0H JK(68熣t&(F!0}G6[̑ ݣ`VzEeFYTf$C*;RSLȣ0JD@pdN f rWLei$QݒUseEM҄$ ^X.cJFIK4j@8vRvkسvvٔ"WI E HdD<(M2VIY=c5ʾyBgpߣӬ(G6tKҬn^@H8U@B 6y`묲VlK9is61YI˴m$z}qC?S˨j)WB KA.Aژ`'B JH -d_3Ev`d:$ f q朴܁# 2R Nhyce/Ǿۙ6t1FŠ[ܲ2lɮEWn>yU=]}AE癕,(ld@dL\zABnwQܩe%z{!AiG-7*UNI<7g]*\ep(ʯ IQ!) Hy\e@L}CprL((γcټ'3hCߡy0l|_{Dax>糫a|lk .Q&$ɠ&_V6.q8dxdiѓ,Ni6LU'ߜ]ZOԿkΤ/W;L80} /5Ob#ah(^EpAj9h,ϦT f޹**dPZ3+A4Jz|ǡ9\ogr 5#ur)ͥ6DB-ME@C"p ^U0u.^}ytpݴ g*]ru g $ɼ \5QDy|%3?[W-ۂm}T?|A g2gʛW;خw_oYhvd뺲}֗Y%3LVÞ~eW O;/T j]F#KH*휯8e&iW*V]iUR}ECL (<is),HtJ'MILx(=I2 J<)Ζ(8aDYǔ#Z(ɂ6FK{-E}3FunR3jML>]r (R(xP,ēJ<\$pǁ&B#ey RP#e556^8v˨Хr-`H"gkwM)4MAX́61f(Um++|[! MRvY~KՒVvd=[搮vbO\ D3 !lX 6@:#谨|4ƨ꫱1vH婖*% 84( XA98`hՁ.jE^XޢgXU#>gemh`P|plKy:s,,lh,?( oTŴ数UWERL(;c6k'9]U.%eģf,DIJ $J")!gSG'rQ}4>X]یv|{kRjdGFlLh.}cݯ{C =T7&՚?<ܼ#}>'$]$H%@QV"H* 6&>I~ vaӵ<Uy!g}5M\50eE >4V\p:Mkzk_䃰`^02r9Dx@M#JŬ`jrmm"D?Q7]h6WhA)4 ]6wV;h,i=^C̚,q q[MV=@n^0a<5YTVRA$)ht0* ?YM˞z ḙ#y1xTכOoQ3 M/^Zw[BNzݧڿPؖrT[NAb_;mp}_!\5WLuJq{*/˼'頾D_W+̔3ÊDG'(Q^Gp LeJ*zL!Dx >-οֳgppgPՅj>`i1(mdDJT;o 2k*zbx$ e&O+ځD%D;M * '(w[ "RHEBmrpkS*)FΎgNZ|HM >luj$mSkz}˷+q} xֺY͜9fP[^hݧL͜{t.-"7f eЬqu}d_rdr/83vۜ\;{[v}M{g%a3eZ|˫3|ϣ[o8؈nKZ{ V|]kuǓwM(N}8-=9 ]+`dHCy p\W| (@RN+ kI1Ve8-|uk+ڃf2&_P;^Z,߭j-vz`íEwȌtKU[qA?d~GuQnrN#$In7Rqgy:ut&ʜJ$@yY1U #F9(IBSCN TdΰoGU$+wH@iH!$爖eSHB7k !ǂA$,(-R؄%kQ=p.r_fב^1$]_~>Phy4 hkkD|l#9Y \W) aMbDaT1 rײaI*iIL@W,2Z!~a]H2vlkK NXW1)! Q NMR@yR kUTxޣ9p Tx(li=F@Q0q(G"iy̑MO]@4 ɐ9A#d+KrN@%Dݪ?;a{w3v(x#oq5V q꽊VI9 #`"\@Ap2j#s e*9#Ѹ!0, C)8) 8E z[xc9;l̡ =ieZ{'t?[3}yDY)u1ME9HiJ@Hpq I@^hAzCg.1(( Oz9\M4 ڨEݤ(:N?po -$_a{SOx;YP;Ctɠ EE ]6:P+]H4Ipb&2D/8XZ'Uf$KW=nH%BvhN譣5 PӐLtɈv9U./>cǂO.^\D>1#$SFۄKhF\YXrXc]o^80a etR5CVzvig&JcH}{L@^Rk!h͆&WK+z|8' dB:1HAR- &Y 0oxTHesON>HfKrYٵ)pϴ=h0#~luo\xs9qv1\{a i4<Ю-7k(BVr;^-(t; y.n:ogm֙1Qy&GW˅97<P+#|Ȯv>zI|5.1o,}w2yW)=I{k'&U N? ;?;?}_?dz7?;Lg~|Z/vI' $I([km+9Ecb 3<Ȋ%RCAVE&)R-4UuW֭~[K⾖|]>aUIq(6f۠OtzT`/+$6M-os/.Hř/~ q!v*qϪ}>c4<y"f+p|Yy;}e1<?@ toBAفN.)Y,ѫXk_vVʧlEy&PI^s'`2(Ўsaqͩ{l}>cBiZ,A d%9Nq g0+’Ge3ژxۄ uR+We+sdY--(VI[^_{C]%-VrN);L:4ՇVhI.Vv;he >{?YVAɒ?wb=#_"*P{#qd(%6'Ɓ8,5h23kdIJYg3NZ\Nv' ~=:Bd/ɽ8j܋+ГiWFёБק>d[]|s wށ͹ D=h +nʅ6z\ # kȯ̢2:&ftH+th~%5pe=/^=ݗVPM | ><TJzi +MջrA場.'sYT%rj]9hk(t@j(,6}+'.9XΡ4$.Cm q2eT .G!1~|UE+*;jW odb]\<6 rw W?y7| T,ku0{^W~ wԽ^[m}0ɂtߕJ+,׻G+fsp2U6D6D7w$g8:Ի4W[ GH7AwTxݝɚ02D@OZ9#P HmƓ4Z9ڱLhY*}IhB)/2F6N*G92#*UʲH@xK%LJ$ 2#2uWg$vte*v>lK{=9w'm6{}[r'c]amh сQYg=&if&K 4XU:C&y΍!I.y$f B`$MXMg= wyՁkg_o{`Se>ŤTjyG,lKyXLY+BS9f\Ζ~;6%77k 9H \!E h!2Ƃ @HǸauUY)ȣ`+'<іr`0n XS$2(vF5q6C/ZGW1#ZjD\/:/6yi\7\P<0RbeAC) Q(dI{! :oS9*il{GKmoX1!G9/OuGO>\8sLGLВn}o~ݤi)G/8M;;z>hQdPZ?|yYݭ-E7|}D][QggzƓJ N1O6v6X`&i܁v/౶>b9CNsxe G^ ΐ8F%o2G!u_ *10gɿFgg-K RةݓN4, %sa&9ewqApxF;G{Ó 3@q@c^Lh{4zBKË1 "޿@4{w4~~L9aTgʺXa8.Ni'::E!@-ku9,4'f*好J TJfL)3Ӏs&ojϼh\N+ M+{RPn)8 J+k֣rWَ^eN'IR;2v}r&sfQ^ZS Ց R^^kDCY'N79m8UQ=H:{PVCYےحv t-@׼mHHH0+v]ޕ6%:u=+@h5@J8' b1Pt,* X|AU'9u Fm'ڙqg5g=VT9Du/wr߀ޑW@oe%N]^|TFXvf:MqtskRx:N؝29qaQ쎋+zxyY^͞|X4-}I#x1.&yڸ I1-yܿs~uΦՙY{zYQՅ4Ҧg|o?b]ת0skYӛ7'7#{ BYPW ×eL8,|< VN*E-w3[!]7x/=nD3b6IO8*¦_NPퟦ^T{ So>_:5Od*߁SMАI19*$z{r*J+'u'H9ƹQ; f,Z'\0)0/Rq@XD$p̡>K=W*qA(2z9-.25TƮ2v6R ]n7[A: BO{msdz"nN}u#mgS3޾]4TtH7\+dKHBB!AC.o,]Zw:'Ac;}C}ZpMF~9?Xuh#`;z sԜ= ٫mz77_oΨ੒yoԧC=7wFf*[r$wOKtd~j=.(DN<]BYVY1 tFĠ-zL:=Fު7zD6O $He@r `)[MTʥSc% )*g mhpJmP`I&i\~66,@i1imܳ:Je|Ĵ:#qDN' $S17A*f>H;ҝ20F%YYC($80BkZ hWmdL{f?.2@k8! i )A6av9K .kGp )i{Q`-VxZ)Tʸ"ʳEEtx!Y2E8m7^7⸩L hOV?Ծ/`j?j߂hv[6^Wy@k-6^kx agklװ6i a5la5l6^æװ6^kVkx a5lװ6^Æa5lװ6^kx aKOW1x [r}5lװ6^kx a5lװ6^ã[+ͱ}H*^9Oeq)̲CT &̵^|VASYI>|Y3ۜZm &LB@2˩Zĥ2rJJour^ef̢%ͷV)B42KR&ɡg aERsZc%W7@e78貆%s9cd_2I ABG6e;\=WihGa]k`rx,cfE2=׼1۔9D/]m2eeAѴASkŚV5\fFjy XmЁt LdHn\zI;_UπB 2ќ&rNe"+Kz&M"k.%H$xYYϪz,jD-;^@Q\E "w\"D@o-\gʚI_mBfPSS=fݻfS;/Sy\ $JG@HPY(SGJtQ|Y& |y+?-?Q5SYgaI9j+p2"*-f4jUZ|IdN5.1c:#h4T Qy,M3IzQEpTbm5xTbuOk{oљik421) *f{E0B1!hg۫$5$0_5y,̷HtĊ !COAy1:$AEc Ɩyd(*sӢX薦eiSNΔHEBBld\?H*_iZ (!j4*Hl<7!A&TRm~Ϙr=oRQ 'fc>}^~|8Ɓd+u&6 gXQsO.7Dp2tQȺ cJ\"()ѢRdbA Fp0d"(i4ف"&HB:}ЄŴt!P-$410Ih+@lY c8"!-# 2WC؉=[*Өx|OMY7! /I+XN+`̑uo9褈ڣ {@!i쑇BQTFz4B: 'lk-dKD('pBxT6bHu1jShyś; `_H7{Z0\-'ʹ9LWځ$4I$I?c8)7faH`mJ{oM]7=ԧ cj[2s 8᯽3wFJ.N/$O5ka QV&10 3gJTMKjN./iCA{ptxP(Oe1Fk?I3l|hgZ5^+~qlFpɈ҉i/lt x?Y.m>_8"&WP4{0]ٕ .Ώ_gk&CtqExLJGjoWEDj_/ ?rsibk$;GRzH]ÈafX>#42vbrӫ1OXpUG],kεZ+tZNͦ5ƓP}1%#`<;V{Cz!} 9_>|xן?|.?;:8O^̓'M'L j M-Vm24\|7X%w}[8,uk@R~z(xJAEӕ$"Gq_ir#YƬ83J؊Kq@/DTwH'ԑ j98+o}ht0 ? -d⽊E^NY?/_hm =K(:Lf㡤=栄#`2(@h˹08Qޑ|޵cBiZ pB&t"dDd_K xfd t@U@d1>U%Sh+?&sn,dY}v}/eo~h[By%`RqhmӍOobYM҇.}TسV]/> v垙,IsH-f`| ֭G:i:45s0e *dkӞrYZ_'Uo]@ 5bwf\R|悑7k0:Zj4d_BzwCȮ{c` lutGհ ӳo Ji\l BM$$)CgjM;ޚ\Gء2 tt)z#I4r#-"G畷2GE&<#(]i,UJRcSdYk_ˊQJTg YR 0dVtG$k@&yqjkT־Gd=G11KH=t lxqxJY oTd.DB,bft6Cx dIsM'q{b.Bvz8/Ώf4D|IG#NjM2`K}ԆhOHED ,8.E)&L&$JcÓ88IsrLRMN2g 9:I`!je69x-z|\kw\oSzrveRZEwhqJʣ'mI4)ukqrVOP@*ϧSn|Y9=0Zb=)3^a W'ۋ-AJYXf.>q s#L]ޔ^X-uw,^蒓e OB[ҕN phJt28mGȘ&{kQt/8Ɠh Q E 3Lo0k`ЍFoO^˲qdH+kvdy8Y]ix_۞+:]U߻~\\@n *TG-+]m4|RJ17J[XC#(EfL,Djkq@t/9GUKrz% KDTRFIu)px h*rI %#mw\7l^kֲzgPܒ;Xܳ%c<26pVkR*|r6E &$%Ylhk¾=2498Ra!!"l݆>#AgoHus]>V,;UiLoR_[tFpUzC ȬRF3ØT98MpNHoFnգemͭzut䕁Ue# --a|S|hq0>= ؚI|Ftt>>jI[٫'%4Fs*̛&$Tb@,Gǟ/8wmI_KbGwu 0^ٛHrp3)" I9V_ _Hq$62 9~WUs^$_zü'pT7QvńbuRJ;kE36PWfhv~EvQt;~^ 5+ c[]> ꚭx'Gd^r{9+Q ŀ (f5;Oj6i65Eх,S7y/$GY+LgHy5]R4_~pT 3!(~x}8oxxQыm%I!DS1@ߦ'1n/kZӕz=^PYs*CsX!BaEnҍJzz-l̹HA05\nmt X 55sHt^\Twv&;UI8ޡ&jGhf⦩W\]:D6a%jvGeݸ"MYuH⃃pvQ \ny{{WhBĆWPt ĩ[3;VnF)-z3{;,7gjԞ[oA{kM7yEN/\jTh3k>dጛ6 N!.GNi&J>V{L#nufz -qϣ(L(Nk@% )w̦P`ׯyY~띝ILd@&p!Vn& { /~3jЪ6p^N&IHinu :_M5Sr4^9+r_hGlao9L\qUsS(/_uQ2Gr8I؎dKRN)bMVF6y|Mwt,1wvx_w=OYzË*~ɑ 5&lo_{mHt (i\e-Pk˽g^ ΫOر'``Vb4>Ѩ,QP Ky .k:NmEtIz]RxwY=(VƟXfIFEvJ93ntbG_7 \/fk7[fNABY)HZ[[ PӜ΅BA&_08AN{#w hsZGDZ(/+p}vС r관Mo6^ hkM\  G}<.m(쵮 vQDlS+4ƀQϩxws9uD%8:"'f zEE,`- 5qZE[鐂@z&E@ru_l= gՒ4nJ帻Kw]:z˻t@G3R>TZ%h;~}S@s B{Z[494O*P1v#no7%nZ~*z0R*84@-@ajKMXr))\ZύRV2IPDRBHψGj>1cTeG(K n@:{rz* =<3}"Yϋ];0|{r1 tybBScSHe KDDFL!"O@x$7FOaLj bI]dFM~sgp0w [6ɲ-:;]sAzhCFNې~}>.S *fwD']DǁFao;O4*JI'$$~E&0! KրTj)idhC2r-MDQ -@'DLaOct,f4t\_/5ٰe2n&wzl.P6׋λoG3 ?닗@+ iqe)HנPHd.Cs} FI+[vPv֨hc\XV[p.it0&9J.ybK-5g=>7ҪbgO`M7;,Kn m75(DR -eܣMJ8]o*gsJiW빢谗{ZE;V| 9[4ZFs{0۟ܚ<hcT21Mo3THY"!H@mBέNcgKlWv,9 '?#yק{nM^!FrT!S$DX4_yp7rr-r28}EWjM9%돇Mm[(8fufqs-_mrlMK6MaycKύG&^ӊJC !9Gr:$Nr9^K8u) 1(T | \ߎ:rlDyn,}xxb'߾PۓN'#Fs,S"3aMbDaT1 bN>#KRNK*2B$>qkP"օ$c#X[î9vrc,NF ^\59KTjyt١|8># ׅM[[c$*0Ʃ*ZeH$)ܝ) Q=#9@}P6gNNy4.jg1P N 'NQ_:p:̡ }Dȍv\y=_NYԮӜ`!^C;yIYǯL3]`I5ra@Vĥ'E9HiJoN@HpL(I@^ۂ8֓zϏw|v&L&PZ9m&fR Pʑ/ZRnFMQ>90PES$H[@X0 {D'#h^^ϊ #5|3`P7*>^7k`!syg(.Zrr{Ug̡.;(_ Ou9TM4 ڨ\[ afR Rs \""J" .ZŠ줝4cg!:dP\x@QQs@G͠ JM| y&2Np8>?Pldu8Mr#68%Wcr1cn<wr)[tbdu R7Ө=te$J$r4 al@&J+Z>O4*Ge句P|+Qjm%DH(P.W-u5upc?4O1^o"GD9ALht@tR'f 2H%Bp$딡" ]`,:Ws6F \ dpV[8QȤ|, 4:/5I,fth\ ޷w?8>X-Z|o0_)H:!`3mѤ :<1q¼Ju1B|ڟr+umv !q< r +6BzQhqQq9Z_Q T@ko,pprcKڅY$`iZ$lQR>yR{О8T;(~GBQ}Nz5B22(o(5 ĨH(T1#CpxU s:#e>%'w'W %WJC+w; \T="q\ ȵw1aDIR}&>0go 3Uj]wL4ύ65'u5SQW|iPKs]g7#~luh_Q<;H> }n|+U#~@i5h^cmFįmc~H}Go~|~?}?Q?_W`}Grj!c(uϷZv͵5d; W}`NA30Ͽ7m.ǠO>tkM{teqUl~Xg߮EϟFhwoi\\]9/\&٦*dJu7YǴC>ieJK(Q,lяMcM%ETn>˸< >D}@31v3l8mW9w&_~t =UƁShe]e$==sO3|M N?' 9/_@M"3?P ? v8 3dsFvvT VE86)t`EǬPU'; w>z_Os~3}krl}&״ɂ֮?Y|Z*|\nuᖭ/֡o?r:L+`6dو7{J#@D~gG0<7fq1e͡%#OcEruX~,q[9se="t>2FP0(cLCŴcKzJ-I*7m ?am0raq$)>Jmm4)8#}(3|':;. "c>6hN6F]ptߖۮ3ϼ$f޵-G2=}Eɘ|x3b|'} Svrt\^v߫.[]"碝"U5G'N&U\+\DQQ-~(UKmo1a;Ʈ;DGtuR,nkjJ3qu"?.s=2 m ) K [3G{ T6^DcK`1**:cN^ UIEUEyt(hRQ?dkxO%V6zXxq}~nN2=V3oܿ{[]=|4Gۛz\558cr{;7zO\j豕 wǞ|elkR:4 }rmWStYK wgӔYԡ:8@EimL؄~&FVheU+*Q S'0%SN1іj-5KߟQeZdʰ'seZ1)FEp; }g,戦f2rI[:Ҫӗ 7NW)uH 50#*ZPC+ Yȕnk9g8n{UY}kYm)ۮ-ױ,O}1/ ]wW7^ Z܃ՃnMxq u4(C%oxQF~@76 tlZv-$Vvj=4Ou6wHWb;i;gO?36"HZUMf1P$U\5SJtPhgOaőL3ȝMn9 r5G6;}q ɩBAyz4ϛ*U*KI jΕHE"cV bqk"XChqa=4Gt=! .sv,.4>0drzv?ʀEh6(Kdir( &eULոT\ KU xdr`ԲªLk'[vB0墸GkR>+N^~q1a,ET'xguLm@L] %h%2 5,vl]p1H6"H{;(:f1 w>[R߿17Hu;.8p48v r0BTc60.Kc }g=#JN11R CTQPJWA*Zo\` 2* 8 瀭 |%QJƀ).i 9 `00ͽq-CF~a_E\WYu[5SKݘyW~|Şlv8ҙrDM)':&  [uI{r= W(UcN6h UuNe/ {q8EV%ɚVh"M%g.R/$fA&!g(؂FT\tt- $ @R60SR Ulgu[Α8oU!=ix, ɘ aJ-zЈF{nQXƐ袒Ge/rjl9)չm3j*b8ey`U!Z*x_ÑjuZ|?NZCCb#%sfsŠ)D-P=媫WY fFEp\T}n{p퇞k{>̀Q|U>JA.JFQ&CLXE\aĂ//;ȓgǶt",Xz' ;؁A9d %/VH@]U[!T2x[A4*D՘AQADAo75= %ܴ]/Oo'z7E?{Ƒl_6ݖ0Y%Y X;Xb #jȈm"V")IIÇ6`[45]]O)KrЊ8dp^zjcqJqP;;K|YGYڗ~(մMAȉPG37Rk`(PKtE>;{KO*/~(b g~RCSg~%r>芫LpJ5+wҘIf_wݿ&O~j|7~ptêzu.&s.8`տ卼|rc֒!-qfhk3`y6Xހ X= f0b䃺?CxQR ZmaJGJkHnXF*{;+ؤRR kIez,ǿ/~ozo_qZ _܅/Ohw47kiKӪQO'|vr-纵 U}?F>ώAӟ|5*eЕ:Ta6Mc-tӫ`I4 C<̧qO"$6FiM<<l6J3 'έ!9x ) L`hRxrޒĥc>Ժ9_}oC` t.b ѾP.+ƏBlϪj0]Um~n~ǺP1vg$].%9۽}:i;qNf4\C[&L-S̗.,-^-:w Q…)k?EnubE x 9F-S?E#BAxGk#A(fa:xc2 !RKq$wZF 'C*&H 1AeߝQ)U.YXߵ3lwx;B|h !rxv9P7!F%Y7vӍۤ %f3XNxac,r(``0Iˎ$^ᝢ/=Mc`D/bBPK(v)fb -͑.537͖&Wn'ޢq_ѺS9e0ucK* ғ,~B&(9񐉓ˌSijҽds/4* cXQ.v[ 1c蜋%v|$KM _XH$.@LQ&59 y09dcYwv'33y2|\-Ǜa >jK;~׵+Q]D]Q!cb ^=^X=qM?i-w>8{R:$ąH;#4_5q0T\ا;co\Cgww/7tzxr 㤑K#9,qAnRzD׭O>e'8=;TW?b#[z4|oчK܎BUQg&קu2sRN Rr=cA!hNe"Hj9VHt`jk6Ovypw3se&X;+9mxgc 'nڢ B@DT},qoTkbhKe7>LElΜs|CW.ҷLд lobm*2a7֘RFeGMY=fx`SœNPiAYNtN z.Zv0IrpY8SX2ἡ&1*Jդ[ů7R^>5wFn{dm,mn6np|p}Jn^fOLk~Px24|KƛAw <_ϊn0zӽ|stSRB_܊m~\jn/tz@gBlFxl2~OC_xR4\ D("5Ư>c0lP^7q ,|R.ػ՛< o*~/h7g 8]JC۫IM߫Ĝ kJ!3~__ J/:3օ'?ر'^sIRRrj8yرX=CrA6rKDV1K`̖ȵޫZs%/DKi2dm( #JkV.$Bc nFf ^zeM'+_})›J)f g8emF`Cg:tsʾp.V/#K:>'׏opiqM`w-_^^Ǟ[Afkftvd{8?^FΞԞ(E5y:̎be*gMQ-i*g#+mxP 맔"IDTsF)" G]fl|8&`U;dm5oҮɷyR5jAĻ6k-4cֹTTz#*\)&`feuL(Zkfl֌ QJءm gIܲ˄.WVO. Ǔgw3!)YiUHDk*"/l- t-k'z9Vb@v!tJn,L) y,[VB>b*ב1Uȯ^ƞy'hf0ZbQF*H1Er&l%*:4T7h5A^zI%>%IK=EƿO"Z\ޔUЧs n]ގݮlYؤD6E%GG@֠D#L.=BvM FdG4FfJ65D+*/%F0Z2:9[,ݡc!nQ`V"ۗ”ZWBc]G;/|LZOeSZ(nj/WߨōY+,4& nLs!yRGb0f[MWKn3ɽ}^1JE$I mtA;WQ83p|S9 5*DKf,v(T1]FB["U^7fl)gSټr Z?,m#l7I\PT I͈PER}ݯeJ<3@JEE9FE|z.ӏh'G͌-1>&N 8%~UZï =jlINR[K$ yVFW]66C1Y@$e&XL8(*(ی'n~5}7#mtP8 AL кjM(k0aAP$NtnaǹEOvEm5nmE"px"?f$c#*E](<Q&0KhDRx۩R8KAI5mnzɢu]pX.]25e'^D"FF(t`V$H %| ziWuŤt[tn#ip eY*%6 L\pQk6ehB/K"9[Y΄<z\c8gM#b QFyrdqRFe Q EF[`?2 P)@ &F{U<_? :kX3}qmQ-0>RS1~p..aF: :;vzŁ7l1\{8lnۥ@i 7Gaa2B˵#X=ɺaa\;2cALyt:Q8#م.ZXXHZbvv8{w~vQ1fgVFaf/k\T!@6%Ad7!h4Fy᳐A29-5}%Pq6y3Sb#ٌ+gjҷ]._PV~.=;pڋ4 }vm4=:z$t(&RVÊO>'4@Nkgq$tVa@6wmݹ2CeQǻC}-]fB1X.|42("  2aIڣVj@ڇD*b-Xc F9GEZ1*(tQa3r6C La7 IW<4:IoޢP p~ >z8S ܕhifsº䠻U Sz7Z IT0Ȋu`FrJA%m) hK)E$Xx㔗>ath@+ <|L`-5SVZ#flFXǧX>:ͼؖO)yh6QO5[|,"EQ'"j %eBVȔ+m6x/Wx/3q.f^S9p f2x{%#7J{o*-̢!QW&FBf>&Au>c0K2g+s?q.G߹vI{ߊ^^V1e@X \"A Ξw@wnKP>Z2˘xDOeoJC 6" g{LV@9[dYEu˲ۋ͓Z雷 SPBseN?4?aP|P9\F ǫWSrtwq5$DGYdXtt\ija/Iج&KJ}O5eYauP%uBFWk4hY'!Ak@v퐌MA^po+b[`P|p'/=7O1MɹN 1Lw68lM'd:Ʈ:E<9 {eBGNiP7tfF^r|Cћl ~o-w|Ը"H{mv/Ka*s4|G3ϬSV 'yW?{7}~)Si_UN*{7|b~NKm/ڂEm\2Wo4=z$HHryϺ?W-WuEtZؙmC*]M%2{yg(Zo L)\NXس|yZߋ S3f:st!)Fu?g &!G1g n2g m ҸJ/~F-B_M :;pv%Z2N6۳ۺO+Ҫf%B]◃V~|?;S#63;O՜mԮ Վgūr5Y7aTT1/HP~cw薑y8pf w)o{,ھ!+hslICIZ+z5YYU.GMfdq+2iiv߿lC?QxhćF{h/P+hy_?^5ɩd"j4-LE9&UA;]2 Q #ܶꎿjQ唍 MvT 턲sUjVUNT'?mv:[ZH+p] MַZC B2V1J:ׂҘ&QR*S)mv>+7]t|j 7 :g$zw+Tmdiu͛h4 cĜDhi{tꬣtWID34fEE^3"Z䔄;znh`6ѨOzq#M#uJ:a;9LBM":JJ1T`&rih*S=^bx6;[_:GBFD^rN;tS8;Md|HEb|;A1#3X2d]_!(Ss.ޞ7G!HUEzr7NTRZ  $Y1ڢDw$ף̉n'zs*Ô|X#7qE);IM -ڒBGZGWH }fN^DCU>)N,tRO.E#OAQ ׄOQdO])i0f;Dp.I{V RBv$JkCvYviF*J@`-AAN:2]:Xid"mM e*3[!(`ry3iAQ 2n?NҿjA!NMFAYQ]TeFTݓ.RVKmgd0[QЯz $$dAQh8(l,@E@H v/i'C9TE54.dPgm! @EŌUDZ1&"9ih>(ؼ($0D!N&ǀY;P7Fi K)Э<$]I4T*#d`&SQd8XQ|IΈ ʃVs I"9d^U0P>xmB08X1F4/! OݗUd 2inuG-A@8>UTBN5~d}%TU;+QVEd_=ŤuԆHT—Pw}CP=n;H HjLEQYf}Jh v% Aڱ", "+P(x:etؐh NmBt$Wo鎠gcQ0%-@Gصl:GE¤2,,HcF8&gو.BPˉ6Bk@#`%NڳF& h%A J"-Ei*5.-]zrD7_ 2IIX@iT_tВ&ZJmm`=A;wnr e5a}v5_.ڎsU&ɒj0fU:9 6IҢGJ$'BitMiVۆnMEѵT$I"e56v(' =1>62cðGzI,WR S9'%(H Ш3) @)w!>06Q1l]` f+Bq&SK:)HF 9,RDyŰ aLW(ƈMn&:ӵ&z :%X:W (]IڈJ5(Z54)hc37 3 E5iփ*M3| R5fҼLFALPha3BBvZx:-] v%DlajlJ!@6(=<V@P8U8P(-lZk䋞P+Bb}4CS3A8QNZ{*(=uGm(6tqHU׈4JCqކ`ݤ1jEr7 "&b9\Y 5iU.oBE!:SJ,=uR@Ւ0[ Y\ yۨO]gF358F'TQ~Ջ6B^ܼ\^_!<%ȇk\XͫWBC:7D7q>Qj5tZ7l7췝5q7Ő>z ;?s!u0M,x@8֘GJ @FD N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zN B9׺q ):lc'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v: p='ыGJm @7[ N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@|@T Ap9'ԏ @O N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'q}^{٫m5V׷ vj~V{@"<&R$>>;q<0Zq (U`S0.7&z-FI{4tEp<", m]=&"xX h4bzt兒AVxQW6NW@gzt9n7Ph~т6q=}y^Vܴp.A'Vgo/ΗV)g;H IybՉ1/!8osP`b~nXGl5/ْ߮y<\2g o~jCZ譫M fcz16O{5,?au^/yY5V~VKmi˵.NDxv ^#֨׷1q_s?7+3cV, l W|]߲gJq }ӫYʒUSѢ[3T-I^A} Yc:&E7T@kQN1wȍQ2 M>"mtܨ[O:ݷ#FM!Gp>BкGUPŅ,FӺvdzDp,ǿV&χ-^"tIRLWWOCkէt쪞LWwz:;|JCߗ˳|?O]]ځkKzpx}׫3̫gg-DCF5mHT+ާD1&o7-+_WAIVKMi3%Ҷ| ߪQ֨|HR9O#ޫ uyY/j<} bmz J-4 SZ ?}XN&_7^8ݪ? 懱t.G,Yv\oUtvoow'إaMH70\}PltP\ȃ]t߀CWti8ߞ_wyߜ/s۝O<=Չߏi˺fk^م꽳9mujDD; mX˜Է~ys/z8^ߟ}>'/_Kr5RZZrQ4[NtVWu#h{ևPڃ9háqa^;$=<0|n'S @]_WQCrbb?_!7{Ƒ 8gw'NV\G_(m АNip_wUZ6n1C(I×Kw'ǧCߪb8prDB 6怂M4(iۊnzέƹ B= 3#X)/lQXHe2Z{=@slG5)YZw9;YW6k7m.?/7Ӵ !BiP9PjZdzE)C5Yk A~OIa!:ìɔkEQ̤N޿TIiE+E=Ԫ촷z;iżsx:W= ?mƙߵj|g]k4ӻ]c6UBG@Q~} ͑` YH7ϤR2U4.(6x61K=ݞXW^~e֦'Zk7ӴxI{V)2fbph)J1HG :̹t~++lkzDI>6|նȾg+3]0.V\4B90xv6,fZј蔱"٣{X症vȾ*sӢXh@tK,nY!clqf7eosp`9۵dz]}r4AT#FxdՓM6[_`R)%dJa#EЍ 븱J$j=bfzEgpv2wWjNQ<I}VSqI2&ye3wEX ޺RUځL'vN+t]ks7:j^qk/HaJ;ut gl]T61s ƃZ ,nv+qvCmsۓkMzf]c]?/$ c9d̪59uJv{yoL4ϲK~?jI΋͑wDhN ]R%(8!\Z>OS{N14I$I vD9ǐULey=Z~Avyɷkd^dyb::gGK%tO/3gG5kf %LZ 00 .flN.9"7^҂ ûEweQ{ @]?S\TK+2Cc+HD+x$G 8#btzv\?Am~ ]qB*$M]A ;vLۋlu?ϯ|^bv:8B( Ĝ(ߟV]sVxQ4u4?jI)\LҒGf>ռ.$xď)oʱA5ax~BϿw߽_y~7}oo7uL+0KQ' $ &${L-kMM-V2ԋa^j>rǼ[\nmH/>3aL_.àmg* A\~&6?[ !Z~*`4_8!]~\"m[mM4N0`'6,똭fXy;E.#K0Іn._El"XWktOpzRN()/ۨzJJƫ=##`"H% W@嬙c7;χvLh=MAM8.:`ɾЖAdAfd 3<#[fnȢ2ٛBU BTr=J Eܪk]`>ykj[-_R_Qַz݅F\!K4YơL5>Yl&eI XEtFILYN!gYvL/ٞbs)5qW>=σlۊ?,Yp.r9(MZ\b@$[El1ȐNy LTZ_'U#nt] P05bE Js7_uI-Mˏow'e5IFa;uZ( ) .e7m5tWNIt,aN0tc8tr#`q4=n_H[|vUY! k Рɹ\2zY{'˲/+ḥTa WJsEt^z+`Xd܃E&G.:JBaSdY)_tYKgPֆ})ц+j܍IB7nAӦ~juc5NuŭD|'m=7G1 b% 7/7G!WKQU{s*ڂ9@ t|ѼKkh~Hܠ.tȝkc9^]vn۷JפQΦ T LGDIxeL9YD+sֈ; Ja%'iKU*> Jyh$3O$cjSo"{zwчӨ#ݱ;~  ].,+M;8ѭ68G4ϣ :,כUL+vV#XL2r206s`{!rd {:7:+xR'zp{FL9&ɩeTeC-1KKxp&3AY"dC#U)C'!M i'$Fʁj6?+ >> ?.ޝl[DY輧?HPNFXt`D!4 C9`R:=scrڋG`Ic!4Kgک;/ 5`Ίa)F[K9iqBVL$2mP4^pl40nTb[3i)99ȢD= 1[N$Ybժ02 b&e!r朴܁t2R MA0.TV}܎@Bt}였<8jqJ8_bcܸAՔC=]4fۮCFX?1< d\"& &:.s\]Wrh4I-wS@t޾y&>WݾiEf3o]6 N sb87HqLH)ޔyE&C;1׊=R^Է"H%^Œ $J6B+hrs6ST.#wlI*'H369s":OfTf5EQ\(V#gF s|@@rHksωq]UjVkxMsUw bzLxqfMu2Uf1qͼnCa!ؘ,oK "ϔa0@fsyvTHPY ] =;6peJd Bds3r ͉ +KQ.{F$׌|CcJ23+paj9E-ǧp&`oN_/Q6Fns69 FȳȨ(N{#Q$oE#*}WiAzN9N١Y'dTyccJG~k_oevVq ݁ۿ"c}M-zѴ,i5r{8ɒ-i(ÚN)8֨ؐ%bSLh;"">uNWkF%Oנ]k#.و?lyadrN y4MkVl;g + uB+£bŴbW8XARvT8aZL(qV#WrO/PolUgR}$J5 >3ijU'$%A 8ARa(Uz^h-<}|=_Z+z?Wdv!$}F:e1%лA^^n*_3گN;E^mN4Qk6?v:5L}"W6=w/1/k馥G˄T3([Y&̚4?혌XnuoiaI @OzɯSe2[l sɲn)t ]]<3aDD1fDˋi J*;YnA"i ?Ȇm<.t~(sq厉-n([2Huiޏ5 Tڎ[J:6a7{^)MRA$)ht0* _YMre 8譾 ;|Xm$"z8sVk 7WjզUG]y{R][WgrWh~Rb'h"Gp LaB r<ے/չι^Q}u肌Ah##Py$$fYCW#\P-3x"]+D$*&ijb NP*D ` Qd.&g F:uѩv›)r_e?}<9tUښÀ;tHwrL]nMK]Zn麖»w=>K{ ȍMD3tfEhiG]#S6JLsbҺN׮Ck;]&=ϵ܌`CӧۯFj[ jn8x3/ ιYoH=?CMwMq`y{z>o朂͏?5,:԰.u*a3԰J԰/05dUXaW\#N]!cgWJ&[vٕd )iWҮ\}:U} %-zJ1I9!v~hWgW%%v8%]eO]epd cgW%3Y[vrؕ$;!v6svjN]eh=vv-zpn?!vdؕႜвP:mթ+֋s•ϮyöaRagҮ山+v%ZvS;N]!aW\jN]eh:vv;l bW#98N.Շ΢3vEݿzy3[Nf-kWV XD7h04 =ܜ3ɬrs N}yзn(нS~p3GgøLp2ֿcqc q%DSB/hK wݼ<\MΔ|Wa_>sl]Uh;Eڡ&z۶?}}fH+@ ) 52&<_k<䫈[,I9y4Ci4ErZpd!5Z^t֙ Ĺ1i4N))Rr]@2FYoR&|!Nk!!bu%`-0!z`0-DsnD^4AɲHWCT*v[~YT.nwgo/rBJrC=T_Hguu{; "._Y8dz.=Z: Ru3g˾Ppl0w1aW1*g@3B9F*gN{6w*-FQ:wQӵX*fU7#[^mI~ %èLvC/IJ*Nfx1V( 0b+Npi_R5(e58S&Ѫ0ug<3ɓߎ7+g+AOdm[۹"DϷ\lq{Zn$C׎$n a=bAţp*fI>L{oPX;*#GNrݨk5[2WSKD FUm+u=t*7u'~tOw|wx~z￿׏p7sQ W`= A 9{CZ4547ka\ MNy͸+0N0qm|ɗNwRnV2N~F20y7W`"GkXNѾPAK<8-Xޚ$ 9P\Brc8+suUK^V+ty/הtSbBjWu )Ʃ, c. pQ)TDCzBop~8X`|6:Ͳ{jS̱ӽ|?lՊ }MvRbKQ:XL:%(nQ?`AXoͽ6pAb""8%51Ax'pF(kXOj z2C:P M܉.]+0_& FTv?_oe pphY\{X ] @!V~teYŸ7Am2t:U83iG*&N(}ɤI{cO{dM{3a\l ?%4Z1i )R`cY/#(hOɂ3e#J e=̘eL)T` X4U\[KqiZ3樶4@ ٻFn$W8f!pv{|F̸6;ZE$nMTIX rD*2_&F1*LB3ccIAI3Os)`TO*0K1[vc44ZĚCv{jK1LS1Kgz{rP2umE|&X2 `U[T?+A9'υAwA@"E%( 'A <QuB97hr[Bww7"72>a .:"d,Qr% P8 ,pn ̙CNX`"3F+B#{kLʭԦ{ t|K%C]*/qЛB1~=!MkJo7a0:hiWk{ۅIvɦa}{]h>hޓV~soo穁/Q_₥pa*-i[>aޙ^PVޛhTTN_n=6>7}7(#҂ٍdEzLQ l}PO<i9%B㤥ONoDHJ T8E974bTFyKf1žF F! Rr>T\$5t S5ZNXzM4#gkcg=ߞ߉viFaWm2?4C]|@闺l;#S|/@b{PR\ G  k!<Ց(}y$ WT!"9"FƭFxI@K粵(E9d.q;p8MMawW+_@m?(h)*Cs]Yg,b\,RPr Ay$R_Aο̃_j^9?6.${7*kK^9%X'c2f4X*P",(NS$<(bQ{fYE?%dN:I;]A(H6f4>V*jzsٷRy.+1u](+r)zHXjģe1j$*[4"twM%gXs89,J7X`T'TSh8iQg"Q<1C*&H!&Fq@G(KK)FN;8T2^cçA[%vcAՄ>|; Љ\ X X?b';bX>Pnm( ei#pv+3N³fhuTIWPJ odc*nT*ՅuPoѴhx~e09Rd Hye$F/;S8[+~a.-;R8^sYK"J¥bR8bNb˹!L3:*/cra&Yj ]N E㉎F&(f(y09d0P,Fe0j|A@⢺KYz{Xg4y˳WW0p]Lq _0/yH ëg«U>i%h/UѬۆ.p(UG)BS D9,ny/-^R&yAId!w5k6pJ. KXXG]J`4: 'up^Q1Kl؝ X1EiA)@p 1j0k. ctR\2+=󒂽E]ss"ŔJ($ 4d (WsL e\J:)A:IIIXipIڔ.Ǩ%9ibFSЪ{^z|b`r")6K \6!y9jsJ5+vd=! ]JZ%aCc_(hˢroMK1@jQ(8c)Q3yTpQƃ: GZ;wY~kB[ԹLRrE%z3_MLR7Skȥ*]XqѢ ½p'X%4BH.9瞣xO0LPxP"p@F Ԋh(]wR>8Uz5$Y˴&$ `1$17 >ipDv]K;(VFrڿBeKʵ[ig4CC*˻8E߂%0'a #ƘC (G2ע-8eu(kׂ lׁ5W7i i)É>ZM3Mü1Rb%, vYGNqAC 9U sd 0, Q ) µ aǤtsR%ȵyΦǞɲt 1a=36p*)c'2hB}qQTTr=>B\+]٫G Pc0CA_BoMWRMSǂiϩ"mbvNe"Hj9VHt`jk6jU͝OmOTyWnGingL3zj(./% ̒.䦓eB|K;YMCJCx= 06V,aE>p\B.+I eIh\ݞY:8Pl2Qһ^%-4lwBXuf) 6LD tҡ}rq:pK@y7(jҝ8Ox 9N F?\?c~i :H   )h,pPH\[?I<[@GVoT{O+xSdc WGUbCJ X6]kBnuwLWVj_XUZWI(dQ9 (ăH; [BQ:]Iz^]z}A%JEԡ "IkLhm04`hcbW(Ek18N :gP@D@5rTƮRm/)r_7_ k_5|W~[\򱶦̠tͶt=ۅwz8V}\ՍFV#L ڽ!zrd2~*1wv֨n]c7{lrrۦU/K{^joۏFf|i4/Ii럶tl|Ksi[R/Xs>y]S7̻*'g![˖[g6Ra}E3^MLlZ/=~R.̶/U&dr8PktuSWQ]ij iD量`e& ԙ蟿1o>**՜]kUOr$K"XQ ⨱SVoǿԫOyg|X\ P~~F$ uZ dS^Z*?ajrP7._csN֖ 5zvd ߖ`u1;e%|`Vi ޕ]oKU(*UhG"E%( 'A Gn ).$p %gyޟ{zMQ/h\%^ L s IKcxc1|Q3`B zlBOg+W 3{?d[=޵#"e96" '` ^mmlmI܋SS8sDp"3~UO |g2F]鈊2DHA0Y BsovF&uA 45aW*U9_oW6Ynge+)WF!PHgY r΀B s6(Z3vH]wȭkwF]춫BRծ+֫=t(q q*2Tn٥hu:uۂ#XQW\ (]]*玉;u-+BWt| k[Wd&7$º*^I߷rIكVfE* j'ƌ)Vm ,KtVEߖjۜa$}@03` SY&W"fBfki G<qCDVQB;y'cƶRq5YW:le!NgW+4I㶗K&ε@Mܸɍ 'LCKi:߿VJrA72xо1NĤP (7+֫4])|"4۫ X1jH^WL^̭fQEF1Hd<9<1tPVc9dUR 1K޽rPIGQ!m_GWqsY .$ *qAjcJ)6VjOMt g\K*S$~~=܉ =%3=?#ysl99JbT<&}'g`ޅOKEFg.̨7w\gO/y"{r5*r;B-FtOFO҉m&M5%'2Y-pD;\J* MZL.91#7^тCa4h菳eb=/It;s"ZMtpqz?VÁ+Έ^:셕ECNwj_s;%يREJ8ލ9M3O~n?xƻypk=u_|?:k;wPD6F f{o%cFt$tlt<3,Hc [E+XVXjУ1{&KGQ\6ҹ#tTn৩_Ao8K{&a^;_>~8~wywV"u) zcm@z#C #Wkz+攗@|[f !>~ Rjm?%tm5+U 'D6"rԛ6h-i|4*X,;~_q}&>M4Z/AۘfsXٝEN#S0 a ~,0]~6N%,ӫX hK4 Q_9NSRڔD"dr @NB PrKDJ"W@嬙estd|v+t8.<`_hCcAft CTp6̑<*c.U%Ux+ܪ[.YV 0`o2VR[e[M7ejo([nCE%`ΘFJk&ei])f[+WٻoJ3b\TR0t z8vБ(F톔sOϾ*@ #\FEki4sn"! 5L=Qiz<yJe7C\Ry-NE-sଗވ8 {0"(, *R a],+UY2k)U,JƘ/E=GLd^V#atH_ <,d8jJ^qd>N=z 8b4ؔ6W)kmƲEl˗~HNqV,KZI3U%Y~adlmź"=х4hy8)) ,o]8)tʒHUbGuB@1͸ he$RI >P1@#pjf *:3)Yj$,YwͰ:3g7BLMb5< 6s\К|ha{R歹ۜ])X`QN3h8aޑ8#( QƥFZeNjwqڧץ7(ٱ[?`ҟſ973 !sY6s9,[{YT9=v|"t9fxUf1kVbV^SUإ e*T+K1zYV^caޝvP)w-/J9'BXEDR8jN `( Of|@xI@H];Ou42Qɧ--Xi#1H:k;3gMa .ȯeqye ]ޞ1&: 9EjI9zJ䰋ȡ9|H`KSӸ@'p{M;RPUG+RBJ;+NY.P(vByܽ]G{d9wj;J. 9ODX\J4:! 'uM @x̣O (DqŜiהqH|}$&zlNG4Hwvfn޹LNFCS]BΕژ LQbJIDWxBP(8(ATt;H2Xamr.TvR2R3RR2{\%C:4%:XϕOQx>8dBiNgM Ю6h'.V a1$Uс[Zyn{c4 j$Q(@(KYR>):克p Fo0wx3lkbPYo49 ,ǯ!aFqxwz<:ޠFmNU{,yLZ;Q{PkyQ;6.M-oJ :b;wS*WqQ "ʝH✻ !U9KĦ*t[KVXj{nk'5]y(Gk%d[y>?v4g;7_Qq9|]8ޏz.U\>辻96ȨeTfMI8"LmxҢC=⛟7HωNy+;v Fc ] Ȋi+U:V3^'L_0o%NiZҼ8.E&+g$& :zJ1'sY;~B}LaG{t6α?Q3LI <'B}ND}#GKLcH$ Ir H(*JY yA `^57#>3C@3j91!.fMY[m% FN *"1;ݭ`;x1<mL&ʌdhLGAR[wfΞ&7c4tu&,}sz=,<'fPO=< S`3X|&XHV+H DQcX(rۍ 0`80 };FI,Q%G6]q:NmB!:9"ϗQ qsh3/s$Ujl(o͉g%C4j넏#Ġ| H.7$j \T XMCANP/@y\N_ƹQ97Dsv;?G}3u6u8-1=VqJ#O)gz=)Y^'(^K{Ip/ %$cdE}o1k?d_mfj.?]4".~OjMCdv:9?M_7יU2rް/~ q㢶L \8֤Pߵ ?<5墭@m>]6xF^5wˌovz_cCvu9zeT~^yt{5/o|Q˫i} ^ AE'$}|hv2u:|A\ Z: Eq=q)5DQ(*%پ$QCQ(*EpQT8 GAQT8 GQ(*EpQT8 GQ(*EpQT8 GY+*q*'p"QD8G("EdڅE]7#RBpGr*NVI0Br$sd.ջd.ݑȩl> = iM( v# J.5D{=1<h:e*!Q4NScp@ :g(@ 9f0*Qj[#TɎmg`sBRbv7 7v*?c4r|k \ߣ+*}.3SvQu; _dlWmҝjZUSk=f_2YUSH~$ưm׫ּOj/EsYZa6loyuwd{8g[7BýkO]]sˢ+zޝ7ս7}s3|VSo!{[OKʍ\d?--~9fE#pyZ oVxGY(kˌzs#fp;+dGWXQ` o͕\F] dT}woC*U6wmp5pV2" \Cؘ^v1Is;nNJ{]v>`C5}|q(jD|퓕u5Bb6t<sA۵ݚrBQe^S0-emF{Qw-~ta;W%Y(ךY +-lg3fs 9D}V]εij8;=֊ޯdd+eo䞯^~ hyL̕p2kM]7ˬ\c+}>ĉ^d0l6ל \ek4zVX\)=_Ec ^ڮ\_W}y`*".[|z߆;3p J2 AsW>!Z=!iY>d2?rlhF[Ԛ1T@`HVIQZbҷ2aTwͼ) >ؤ(Nuuº|}Ұ{ݳ݇  / Q>ոO8_ *| `^n}ӿn>[ݼtq?zqNO6Xɳ>\ B}F@>p\e ;Bs|_QW#p,2ϕFjd`[Δq*JJ( mXH%.rgDpVK-)I1%@JG,w-(7H\ЦqfIPgM#G!K)0tT.gIh :gZHB$2Rƻ9/}`A>\ ZRL^=%DI-83ǯ#P2𲲜U#gC9k/@aYdi Urс xKޟQM0QjDblhC܉@M\Qo'o [d|MI;,')[%Ō&BRZՠV w~OZ v0> 3R:'hxp4Ԑ< Ay,E$3(BV;QEqlU?jܸB>rWfs jW̍;[=7n Zr1[%iTnB@[P*]\50_n5l.No9Odz騕9ej(MQ IVs=b3"ox+ s C[N3ܦ|`PH*h mH}2R>OES_yPXwS}M ?Ӄxw}]~뛃o{{;xo|CV{${˽0\^pUiҩM5z>kYGwsNo㸴InoFiEFX]ϭ&ytu=J Mb~h%83œԪGn@/.ܶ0<}k}U4O:O(s>lS j98+oiQD}u`W'oaYAفFJ&J*}O LJؿ?u;ufO+1eN<[x,YZ$DU“p0VVhǹ08;# m/fnhG)$h%#'8rE3 [#QlB[U+.KQ>C5p4V[ƫ[-^_>hZc,WH`R\M@M0&En'~$Ⱥu9K&lͲaz+w]Pw@*Wv'[7Kֱf偙,sdC4!郕@@$ B#"S!9<2A_ '\)I^Nƙt2NF8BڢX]._P6gVy+MfizxxZV 8YMp6]Y{XOݼtSCFv^D2 f>:B ]an4;tDMutFհ  F97F(=DAˣeٗ魫Jk*T~PH_*-C02{ 8n N&Ap;$BERk.L,kUYqU6J(X$s1Pjae297C͊Ba iS6=6:ISz ֆ*}[w*A̧1sR[EuCNMrT79BuF4ʍ7AdED- O ]nY^ƺ˾zWH۪x3R;Rd&ɤx2L̵29/{geN8I+|\u }Ktz10Z>ࠔWp4u0~"] VM>ewӨgQ`U]GRc+I+ydA#s{A!oVWHqpa*7Dv1A'z\(UIjU8_rt<GopT ݘ )#g|J#viH!iTҎegbaY~NuʥK@%EFILrQS(1J:YaMRJ̅7@ KD2#0u 6&g_H,9W}؛yKfhu8NgG5s؁L{7g4֕ 񞐐Ius)Jj8MD!B25ӡGu.797 zDG. Bh$#]C97KZd< sV{BAO:; ., n47negXӭ YD@4: \p# )B+\d˜8dܻPo|n}[!巔cnʥ/{փa Y赑dKN0~!8AEgTƚvBŬ47$GeȚ< $7qNqt 2koE T'm.ɔ=nhqoB>fRCq"Ϡ R@c80:v $%%r$G>'% @1B8As @0䆉nU&Dsf'gvsl)/dh>3"0p0eXO5rnFR'/zB'E4gQ[|_' .K(_=0ȂR ,5 %@xB4y>ʥ\^ᝣ/di=/aNqWKe)!ш>rd |H N!2g@Z0LF~m5r6hQ-k_<\[wa8Ǯoq,>o~v)j,&WyIٹO^[!W[A+k2K=Ǜ/."+KQډƅEi>[mk][o[9+F?Rd034hbM-߷(ٲc]bQ!}E*~E:=w[ F I3( RX".„b)4D e*Zi6;s)@*C7e)J&rNx8p`ʌ )+ٚЬ8{d.ź./&=Mj!+HLhUVE`4u5Hu(9JnRn{r:R:cicM.!r0sRpĞSUMJ!rn $hGk ݄PWz+uFTv`$] IJ6[YmX$"e&-z%N:-)lW 柧)٘O`T2[S<1Ns?9&a2אM6vB66Tt6e1/>r3- Ox淹hX`]]>#1E@[ Rq6{%fbHb/cw`z ׵>8"rkc[R9!c<b2,PrRqԫ*|_n?7|:ڷXJ\\^Zl;z.ts=JEyqH>m絻{[niD|w۰HXǣbIp"i넴 ;,Ude$$^TSawe}Os9~;zym16RR 9"WVIq1+1P)yx Cz`/Ǜg/oHTզ+Ue,C@dd&EA[}3?2އlv_8T0KE#I- 7B6K-x5ї]a^i_;xCtdFm(p !Uq>눎H^:Gx37`Jy N gs 7SA[g:5}.mAA{VLf"A-Zݽs|GIW p5|MqT06_ׁX&>Ո|;5)fƐ,$_FCNapNW Ŋ:Nl'^߾fŽ߁9>d:}Z@YKr!DDFPTVEz[d{=ܝ^W50aN7WzCp}ݸ'&yPS`2$})WQlje~Oz|./Ɉ>\7|?b #8@vp3٨KNԋ\xr\Aő{)ᄔt1V/\$i)(O" "[$) D91$cHv.2DQȶ8{/,})m? NgJoka4|y6\WomR{tcE{JXbIBq8 q!ݹ@ȭ-tU@hZ{7T}Sf_[ wz|8_1n =5n۹yv}]^nouwШ{G}G莎sӇŞݻ[HoUsojMpCw ";}=r3Jh4Gc[x3&2 w-u4rNDbj\ID=^?n>nӓJvP!h .YI *f2P(H6 J1RimB>PŘ/(hIFsUKfJzWp|#m}qa$UU6ICg>f4>yAc}!Z_qZ;LL~YD Ǟ`±&ķ&)=քb!Z-%8uj >Bt: JBEɐ0cc'&(=pC <$`Ee[C-KW}+k-VvTd^ >:|&ɑ<裭\oV)'VeP? 5 PHD?eTL D\G~ėKGyP5"4Q&`Yg%3dbl}PtvF{PܫFƽ)67I߀k)קϟ1S)T1z=f \!cK>>9ÝO` vx`F\9F;]iľx+=l(ΦQULDUcSiʢJc'qI_:cXdf |lLy7F*} /d]+֜Jm Ʋ) dcsHz IL<+ɴNcH^] ޒzd 7) K d5\-8-Q 츞.6 XGp}LJu'圮.YY-gO ;[LĹ EeÏc\|N"7,tlz?vjc9ȷjy-+t {g./->bloż^r?s}3Η-lo؇=3n f-3Ƴϗ7[QWd<;~z뤀#gTpH9[|(W_rњLCpbHJΖIv>Ni$ۀG+U"fM>{p}j𕛒[\v-{|f{Lo h, O30Z?y;߭%62=}=(E1յ\Pys,R,e&Mz%ҲQw#oJ2DTpF#pZ]3qv(> |1mߙ}Nm8'Χ`&2%KzAPt $6:KTPZC4-@R6 DHk\CAK@`QKF`2^tj&J؇PO\; ˤF*$ #mQ%F'IK#kb1Td^\R }–`pkrYf6Ig`<febXm ډrFF#:rXԞ+YLҊhɊLA=wBP^9BB۔T1+GZY3q] lU|oaKK.( *gZ8B;Rۀq:upI./kH(߯z8C_Ze[L6rDyɥj-[3NJ/E~fG&k mTDl0g7)9S\[EED\ZŠ:?{u 0D o(*tNZBII(3$:QDqLXxAi<ˑ쇧{ލ1NDV! @yIGwEs&44$$xi4;3]2"]#= XPӅ p5c>B9`đdt hp  HY+KģxQ-!J),k ^Oav9Tu|tc&J $rJ%L6=MaO6i3rVV.,X)m%WV*Wi+~QTQt ({J,pE͆:W%zU$((' HAR- &Y 0oxTH+ \j&2FHj24E!^bY|iGj^ײԣF{GA xy&ey[Q$ybyD?P! `FT\f vzWkB=8Bk/Lǣ%a݇Y$`iNZ$lQR>yr{О84;cgawTi/;qD!QPaׁfaֹQ9QbF(t DzKO&5lϿZPu"R?0 0`Л˛rDiݛ5aFX~yWzz\x|q˵0 vnEjio/p>8|#4GrHaAs01Q8y% 7DOn_[(liԍϪ5I[Kd>w}=Az9"~JOho4fm*:g{u_xo}ٿ7߽?L۳A'iލ$˃E$x=ڡ %=R7F|k綵 ?~a?$M ZE[^q?گ (旳3;^{N@MWD[/CB4bÀwy5^^n'OXlNUzSwa؂7^;gcQG 9z `N.~OJ_Sp&&%@N7`p/&52%E uNSGGZ|lۄӴ;_كU3a8AGBit-&P`QzkH У D-BY:|b<+sV:gY]Ֆ|+x|+|BϷj_JkOW/TRڮ_Iy>M8 l"Uj] Ad墶SDCYwޚC;(W쐁TOvqb[k0ƛK:GOuDu6KQ">`AXoވY5Ґ  ɉh/ >it3R]cJ'*/ev=~Av\\{XO]S jxڋ2 a v=:Bd߮UMO#tH@w QQ6FH ]~z-@ $nD x1QADtåS:!4%&=01K~O VP p`Nyc8aD"b-dAK^1r8W >_}sY1Pp1)QX`'+% yI :@w HP*Ximr.tr{BNRGRR2{\Ҩ6S&9iJhZUz^RX'kBhmJS+vdLbO\ D3 EذluFXe|J*ceԧZFH<Рd2` 98`K"|8x'cG$c8f3'-5hkF>ԅ)(Re4'YB_#2WBxQMWk&UEV NBe@kͬ +5ӇnCm)[,ݻC \RP+H }-e_QAշEWƿV.r)"{SlgT!֩Y'qk=]^sq8<ܐi_tp^,üCO+f:aΫ38K ftiuu) 3A žR\i{0*w  s*a bgB@ zsu(CY;Qv@ m i)-_A?˓y6n~~4 OFͫYbZnOx坝N8}coFQNO#?*UD }L>.-x|awJ\Ԕ!o ey4Se-gX"g;o 8^Wm ^a ^v]DPgIhtmn;+xgڣc: ^U(!1)' h2ΥH]& HJyZ\>d1P@xb;uaU nH[ g^M%<-ۛ&8O$GrϴiOjǚw="9NHҕKT dqe%Pkczc*j}̻qwQܦh=^ǿZ$6 Z#o:bj)HZaB$cls<(%IĔӌC 5\0(3!1j}Imm"0-FΞcg}u5~S/1 m0CmW\iY~vlkW?0ޝ 24cyOP6BFyVV05FxyaN1tp"X!1JhT% z 1X%SH\D 9Hhf INΔ Qg9KVPKʊGiFHwNKWiH_wrvZw`diLޭ<ӆ~q?x<\iB%#Pi@ C 5ā!2YJbx(xz:QtK<ݩq"x ΢C+J\)qi9Ij祢``(;SDdn J6f(g/oP`QYK$I0] N+*3N2$HH#XpDB_ 2 !]D n70=,J(,M]j2Koj3/hފ<6;Q~&px)5Fz--P35[һV[H)_!r+i5L]ue3+''H)c2m?f q61nMoXl{^hƣіڻW߃GôY"ţf_qr4\?rSo9dΚM=Ge;RWyorˉ`OC_:|sy 6rpa9,URIJ*80%|Gݡݟ'򌾻BͩD$|r@FC^bIL5-@d٥@S{KlMdZfu3 }OBNyd+㋧KQy)P9kmHeMo-88IN'ؗ5ZKCJvE!)R%lIM?ܺ._UWW9p8- qf*1S iHv-XPN\ҙdUpLtl799R3hA`!\ȹ[oY=/9;ss٧]~γ|$ϒW}]wڅ6Tp(s@@+VTε `YQ)wn1‘'0I\u"kIt\;ZAkle%*Y><^{[M_o]{oq]f=ބ_FR5[zY:͜`Y +UZ1 &{c{g\JVu~dh9ڃ&6$br<@Hғ J#A{="3[E4 Y2it=-cɺ vKg*OvhU|wX0($g9kC)0$u&{c&䪚QCc3N`l=.EtX pJLJfFݚv(g 'B;]xE fJ%~dNr8L~n֝ߟ?smHW!&XV$@,2a@Arl."  T]FdIP?9/jSh Zz-#r1 $mV2 hj7FEqι];ڢ;Cn+1NH3ϲ59Fk\)UhЎEz1c[k+t/1z1.56tI]2+ҽzf˥K3Q: QP dGJ6\U=diU`Z (!SʵR \A8UOrR{L@YLXwN %>"qEU&v‘"ol*+t 4(&iʓwM 7}rbO/v<&kdɴ7r 9%880 Dwq:sRgԶsfILV \`,.5"&L.91#7^҄n7pQ0t15I[),>5y^j9pKcU43F KɌ8Gf&W7%Hv_3ϻ^x:n k'|zr[av.PD6(n04x"[6#IuHgˆK!s01Q4e%N/=Crvӗ\6w5F\MliMS)+bG?G<=z[:8O R$m@CǏZƛ-\eh֓ _a\d5_yɸ7^qzОmHW͏|;Nơ$.v?ejW|R@WQ׃-D6Y-i^ +bB awe5 /GZDW<6(p8sn=(>e'hWYzN8L"XWkGn?v8͞Ҧ%z![ rU[&T2*g,;#SmۄӴ;_9Ap #'Br 2[򤂳YfQa,BmUr5LYstɲ+ 9-$Ge2tW/) G|;G/7u?T'{?ёn"TСptZ;? ozsiB`3rQ=fΕzjO}r&s7Iżo?ɳif ҹmm9va?3Mə*jjz鋬=W4@M߲_0o2Fͪ7č%yc3RߎNJwb`g?=*dЩ # UCGsDC$f'@at}w-É↩C%h5f4V[b2SKQ7u?;h8КRdk+iqhF3T=oqrs@Vf[m3W n\nٲ,}쥌zqMW}v &TMZ&Bt q2EYH(c""[@+aAGZ򱐍oǘ*~8)eeh4hbߓMKoŻi;G^7>}m~ ~Z>.QT5{t|>[+.r.I OPGsG.!oW5c9\سɴOW;XPHy3I"$>|hpNMU`^<#tHalru˜1^3hxiR…m2 <~g*)//d{&Ysn+4ywų&_6|vB᛽g}S&ޖh!|m8ө5t >{}O:E7oܠ͏Y87mߍWp_RVR$- DwƑ~KVŝ˟x B1*{#i#FZ,֭ HZP%"Dά[K`IshLKe$P;R\#ŤG$@G+ʻ-;^y_`ѷ/^흀e~e 񖪑(YS.:D9jP1 d9{,JPb*x-FixzUe[nٖRhoү?.M@Z k>X(Fy*CQTP@.ҢMFzYւe(>ee1PdD:ܠTF9F#puV.7$ iaZ_ol򦂻7FZB OO^+:RVG3OL=sgnSu&k)NeZy>YE)JA4$ aY7L>8|X R2Ǣ\*M5\b5O:ɪ ́+El`t(ZkfܮoF)$gl l % ^uᒢl'uYe>~ְY:[~tpy`" MB)hkY(!RE 99+reo\کyUUm*huF3vc'mITDB׺u+rnq2m͸cSkm5hDeBJ%ɞ+%`$$9[v6Zb,a!h"d$Rs2pd@,;ՄuM3rnׇSlRhfFԍ54o,ušm8¢fߘX2jY9&ɒTRbOZjTA-uՃVQzamu6Mm͠*ks.A(IQ+jΎ׺Ɨv%,۳lGFKc"y2]3ud]j7.F]2u}Q7ܻ6lNo*mx_]ζnS+r;7JqΧ`R*HR^(T _=RFe@-4T 9,Zlږh)E>^"Q:XT RZ3rHaEӄh>%ۗB+fVBR6vvZ{1LӪnwѷRu`AMej7Z'SXeRhTfh@^\RP}9h V%Wk1Hn3LI g/1["Xl2$6K_(g4(r_AES \8TcRI6Lt͡F!^:BBٔd1N W:&^5f)g\ ĬgMʧBK.H 2@1"1}-w BLØB2-0AZgz =\-r= m^YU7d@`)G20ʵAt|8Izd}l.$MJ'@Bt5D~̘(EŻwDȈtYB ģx,Vm!Jo,k ̪aMpu _t^ o\25e0$0}S,oHıQC4[M6YMYƄ$e4@Jf(b2d3I.fPfYHj[j}} +^˥;6_0J,_9DmZ*oAqdB_H7Q @ˀ@3$/:ɬ@P*DKѨ4::aA:6;dg%?YeV T{3iLʩtrx8%a]% PV%P (g&H%%#BDlvXi`Ga8 Pi-;–B*F!Th:W+"Y"=1^] *^zbҘP~F?BQ~{(yE|\TV#"{t<=BSbM2s9Ő>ԋ0>DyUc0?/>,{<:GWբz;58%$`QU T ӝ1+:ٹ_g͜UjAHBZ`zr̩%' {h|\$dʣ0o}:} "9%׋Og5ѫWuwNZ*rq w7ymJد?PGf}oNή!krx?[BYmq~/'ƯFr$mh0r0y"cA4:gbbgcprT)nuF]Ω)-:adOfʨF_&=W榛Z`xC{ ޏ~u X j]:B-C,gXz]bIr6NdF&d l! [[pfC!X @ȬC mkocj$ ClVrҩ8ƙtuKV܎*go{=~Avp=ugPY{q;*j/C]C=:RVC{tdz qÊ>iRJ:K,ˌdR mtOmqۭ+[Ty:ҥ!Z-%r!@QG2B" XȾ\&MƀTʄ|,1Z%_4آ$)TKEZ1J(Ta3rnGK n@yhug'_>s1@GQR^^{4|R1Z 9a]).*: B"G|ǍJAP$Ц0 "kYPfhLBtR( mKYlY#!Kv҃Xj5<|L`-5B)+ACHV3rnGXgƟ:͢ݚɧqʹ=[糋?v(cרnA8M$ E> RS:2/mTPEY>o7/ x-j!ܝ۳ ZQjPLs}{ /߯_ K^ w'HG~Wtu-|7r~oB]TʞXtrSfb(ȆeYo/Gf/^ዿ8>W%VBuzve >$/寋ݑ|n FlzpΟۯ,_o+`gGٻFr$WzvVf>T7  lblMɒƒ^[>RM]vPUIE22"&sJ__]07 R]ޣO Lܣػ\ߋZaAW?_-/!CK2Zbnр\Ҟi˺ ղPl9]+ے]>iL>gfGsR{91xB+4d"3eQqVxFkyC,ǔ恆3PFHflW6L^GG~;Ѽgj֧͙`"ˌZs-hXkҲhLD]s,)H))9;AD`He y!AC@ mȒߜ4hZmj >pJN ZHm$q`>El&v"E:r y0)T~ttBÒ"*)#FggI %| bu6hI:;!N|[;}m-wx v/{hNj! q?Lc ) K;TPzt-K:^|q-[~V϶zw77fls4YG׷xzK d"Y?. ]/V>iZtqmk!׊6)yf&I rx홗uCg_Z7뻧N6tzfC=_?eC-lݾuƛ;_lJz4Zϼ͜ v'Gn8bW3_MOz)|emOsQY.hsTWW]|lXjuls=['?48qI׍ e {jTFF!G/Kċ +opJN,9%HfeQryF`)[&*)}jLdZ!?1Eł4]*鿟)Yui#}fB3s^'4ޗ.5SL3˄[=hEmC> Ge/X).nT\9n-bHݤS.ѐ<#8М翌TZb~(B"J樕*dNFĎ=M.2$Ufr 3}J̅3I2qlI@UrSchףwEp~ ~}9][!eYaYgMφ2' %"qS\.Eփr&(PAi&&uD(sy1;`*R4#su<F?'%2+1`b,E3cv Az߽ }?&࣋˽ͰZl>J.Q٩]|a[-\`dy cȢx>HlܱRJ@gWE: q]!cHQ"o8r"p kAn15MC*S:AOB:z_73EK=T>'12sP,z0 ɒq,Y WLuBJ?SԢEg5C!Cډն~ Y/Fϻ #YBosSxSAwb$ ΂,ti5G[WVO[/` ]Ͱ$R}Nb\=H!qG{~_Xˬ`p"-£1=-UAyQ}Hd>Mg n@e;ϝdp!j__}Y*jwpU4+cy l$?**q.pUgWEJg:pe-+o"gWE\s+V2upU亃Wf.&8~'k|7jC7>_=&|bZs8=3i)E^ԛ|"WrybVVV_[L99^I`٬E\~6kmVS_kgRv:bcփ"=k|L1ypU䢃wW嫇 4gW pnҊ#9^wR'v+굯ãS8+GVS+Kmtb*7SjËo+f'O؟,NF_?Sz474z9{t ۯvi\釻JwF0]y60]ug$-pq0]ӂ8 " eۯU_h̻X9)F7J#661,)KrOI4Rf2sl2Q'&P(2K6bOz83{n2B2n6a#8W b-sc䋱RaO>>>S:tB٬룣6e6X=p~텹& 2tpHx{TjIv`GZ2n8|рSW@ %ѹFA ڜH Hx#S@{veOڕ9jWoyǒ>;)%zCn 97ʃE&<<+quǭ~JM9|N>:y$Yj]Gk ̯G ?k+ҮEsR7ܨ\wMZ@D&X[qTnMmFi炛5u7jV}}0e%LQ)tHmQ#jWj9v*ykH'K)vZߍnLYlS”x.>ql2GpN{c0;o9֎[+ھz`^߲mZr?~|:Z\<䔰 CɲN(#ڈ!9=@g.J'Vrz) K@@2dԼI= c!)*fm,kb\dp1PNA0DI<}:@FuXK5q6̨_ք/x*Xm~UeDT"v2<ٞJ,2Yi/ٰy.SQr eWebZ8B;Rۀq:sI./kpUzdr$Q$ kzjic"L/p`7&(3UI97IٓVtiv;#nU'֋f.\K6Ջch{8~^|+3TBpPa$^L踵wF2nq)2ҘHfm#cDQbΧv)kR2РA,guvas~a}Gݾ3OɅ({b.+% Q(yZ[%a`St b$`0Ff%9OZSHS^*PddM2с%v쌜{2ݡz>!heXhm;J)wwcnp~z!_bLg3GԚ5(.ot,*`5Jx2F¾P+(ahj#zLryS/O{9ۇ$b%2vv/hRP`sI?Nx+u`JևN,28֠b-IzS3AbTB;DU7O#-bYgSZ Q'1*~ Yݐ&{QAT9/5#BPa = cTbTb[D!a/]/zZe |pآnRk@K8+lb:Zx6OZ n f N&_K]6ChS.6]eT*W()`g䉒MQy"Ӫfe]%`hmlFԱIҥG[+YP:PWl| wg4،Whᒑv:R[EDJo$ 9PbrA &ŔUuLjH*%6 L}IkK.h K"wM9l 5_rg2d6ʨ,^LTz(MZ(4YuG̐|PE 1ݱKMX eȏAN*`q+-C1Ҙffk׏h 5a+eMZB,XҀ{ Z2٣H db`fq{^2M#l)fBFZRAIMpUȥ@(ҩ`\bpxMJ>^u>xK &i ~=וt{V;g`Opg{sFDl0BĚd2EU'//k f:ߋOC.3m쩰喼?!_E3Y_DI@ N %dw"i-5锎bozJxF[NP4Y(I-Oe3 !ՀÃE4>K-gI̔q0mn=] NbH3ji~V|v|PJ01u\G=[>)vζ6k9=R-x7Lk?_fg޴'9?;x} kq fs^ 1!׍YחS:%:eϧʠFLf57:n6Npxr_x{o|W_H^7߰aݒy- nk~ي' 1jho1>CCO={ G^3 KNo!.mk+@Zx7pb]NVa]U] /deN8EKoP%sB9 R(- ^?u %eaOdQ80(3цWFtvP'`c1AP/tNs};_|h٠~EՋYl^O|M=/<C>#ON/#'ɷYM9#7#Vv:Jr+\9: cT5YF~_o-{rI|y`|7z1Qlu#[%K_;[fMrZXsql[0vrPW:`v3?8;8iOwOә} ^`~>:(|M3aͫ{[%hX_͑-^qv;'2nrbAӜJk!RSh4轊XDۧ|)#ߍ'of'|I![u @*=,54R{ R-gÓl&_mf4G]l@6 QDjDKE2PX 0c7#L΂P ( "JP]6z>\g,(eY;LԮ`mF!RB(Fb eˤL2xﱨ+p t麆3rBTL֋ko̙'w:-VI5Sn#V[%=^HhACɇqs%8hѾ#8:cJBߤbD Y\mL #aJRysEi!? p/g6_!UF"6TXN]5 d4(IZ! a'm8eT9=C&d (W N11&`_XDvF=JNStEk 0ާizݞ'r~X7gK|: ʻK 0>ɏUM WЍ4} MY=$oCѲV0V(>@^#]{b#?_U? U9iV2]e6 4zR \o΃K΃K(;ESܽX0uk wAFʉqʰ6+ G eZ30鵌hFs+%"3&NG tt,wePٔw1*r\ZhWV|0"oYg6{z^l*-;KȽUgIbn:l+A bxeֶTJg=nzb0SiBI>@F˯6F'aӏR[0EE!fwɷ중e#cdXQ10 .f`rH8+: $$R~IҪs$%)|P#0 J3}1p&i;\%)8=B?|iI+^(sӤ_pWzccD JKq1p}5\ U"\)B=\55(G/eS(/gN?>6ǕW7R6B"7?UUCoN`p@{" X qM0]LgG0R`:IKTa:Io XO: ;)lk,iZRwYˏ!>3js{o]^=xNexMcL+!VߪFۿ7ʓtӥF ߝe}%G|B`}9挧}6o:SRePw~0dw?O77tEye 7oa#u pIo:_a^]-x=o  LrLqAAFIY`Lʍ唈; g @lT)v  iE$&cPr:õSQ}V^Sf1 v:\!AрmP S߂C] pT(r(ņ0*jgF%50Q-΃(!P5CPB 2 0qG_룟/ܚly wgD7$_Z=RÈv B@ Yv Ki`O װZN!df[{ rݚҡ%Z|`Փ{'-{~N246$ Mq U R }!4H8*s`l<[0zL؎%g1U`ѻGwE&c֮; a!mQ7AiFr4Vf8F$1R(;CJb &gzSk҆K'$h8ǜRR&yʔ%y!DkA: $P A^ ƨU1k(KZ+^jFe^+01VJVJPG `S؈` 84vT :gQ"u+&1Cb m J/jXXaɥhưl9#E5 D4V _RʉQ0T` \( 4JUߏF \|w{ojl~~ξO[&pp nF=5w+IK>0㊟>ɜuC6AΪ:p>w+( Q*MyÉ(Ȫ'.!Y:xΉҪXn ][1,.t57ہi ӢH3e[5 [fL KnbE@-@1ݪYu Qۣ\?G>.Rr@$3S^ %!PM GE{,sL(QmGs+:2,"saREbD h픚Mg('T}܁_k6X̜wX'˙S?'J W瀺K{ Q%&71 ML⊋ oUor&R"oNTKAG;Zg@׆<ݱX;րB dHԩȕu\`iACɂ<7Iz6AꅑD"!汍(I9TV EAh乳(ag[=XУE_.ևԢev9C"Cz)ʭٍa`'/B2E]vvbJs\ZPw]^E2C.r\m;Sgϛ0b–@TaFG?iyC?e/tUa˫1G$>xS!Ei呵.nLܸwNA<`V9=tNo!IZNJ.{=}oQOhj+/0?fyyѹdV?nH#E'6uQ7IwM*IzDm)]lq܍O6 =%sBxn 7ْZ*SN[AIϐZ'k]#Q5JM|X|-]Ob&wѺEJIxAu{s@ePw[ui3]VD-*91w{^qU5]twk0XBD )l@e1TAw?.$z[/ @Nkm#GE6Ef'3b_60b։-{-{2غV$:V΃#u|U*fEKA y+#Hʘr QZ) v)~hw3owKn4&zeٻf&ˋyӠur#[{>?t4.0ZǼt>=xR+A*̙TcsliK)GAJBc(LG*/%4\P܌%Nqu˪>[<<ǻ}+گ<',+"1̡}qX{ڕ3e!Vjlgl] *eBP~ }mnT}}zNnG[ly?ZC1YJl߿}RAj0 Bt"jQu&;Y)yOI"Uޙ8IF@P"$ E Hx͟2E$Pjt <6v3R_ޝ3 ܌O6Me;<]g|Vjphe^d)ࣁ4abU^F>|(%CKS9EKŪ='r I})&BMs:JiM,Zkf<G)O qƆp^QL*9GoL 0kfBMOqr. AiLqu,ШT@9gtfbl m=4T/f%4ЙL2vgL \A*ޤ9kt:ל]͸cSkm=h[fxZ&$QJ@(%0,A- ] a/5 \&5%ţcrY ;r3rևS(ůqW4b3T# qЈi)u {i5%}X#D)kYMic"LmjB80 Ł*Uސ#'AGdOZ}@T Mc،5#WX/N%4l%EX/A/z1u0j]d20 SRAx\$TTJ2f`@gd 9A/>^}،;6ՇXn3*ag Uݗ7C>Q:du{oҘH iF;t>u&ݥdC[W3Yr۝.0c4if<"vN>`tĞJi( HJAJI0T%Lmwr%I]θ')$)v/(2AJH$hQbpg3rHebC2Bl^v7=ۗqaWVBرMYŵ;_)O98փHo/Q5~E:JAZY1! v7r>=Z*E1 FW1Hn3՜qI g$["XlD.EM*q.Oԉo \T΢-Vl%32Ioj;U 5AhF3yE 5#gM9/@aԺ9'uD4٣ 1x SڂQI50Rme_+5?s0+"ŘuuJ^ 8e~=UZY_9e j' ~RxwPTlL>gT6;m00Pt2ZBFrB,3R1h8vjfD@ L* 'Vz+$Iɇ PP4,2!W@F"0TF3 hR'-Lˢ,9:9kZ΄yz_߂rG2d6ʨ,^LTz(MZ(4YG̐|imWuK.. (v8R5s DS|0vFY"(cӶJ;AcP̸4g]zCXaEI+Q%KV"wPbAK={)Lk%$?}??iZˎPQiI%uh6y_W9"1Hq 96)*Aj+H'ko>wkY1NkkvnQYEz\-*CSsbM2cRķ* ӳ53kه!ř>TXrOwE3Y_S6#'=;58%ޙ;ԸS:IO8ts sVa%# AkwR (䲙Kr{1ՀÃE4K,gӘ)hBH[z?:"ŷzMC*Ĩyu{J|s%ۊڬ Jc3ͯ.aFڇ~y՟h~upv1W\{Oh6:\@˪'/d94yaX5R+G2NV VÞa3kL^G Ytr~1ӫ1'{$UG]jJ2R>;=z)2}'G[zlvS{M?f7OfG8>;bv/߿>__}[o_H^W߰!$Iy+؂?|Цe4kwq'rV=:ʓ~Bj:'gn@ w:QvJ2# 2=.ۻK˻?yWZhԕv;ttcr:9Bs^:c1$ PI(3hehTZۈ:DʢXU HWLqXT=jڤE5Fynh/Vw2v5 I_?wv`WOf|  £[Em5o=4 W;oaj vGҔΠ])tEٺJ@~@VC{JwיE^X!mS^~L6J΀ P[#N IM&R5EgoŖ令LmrfWX*~F~Tq>Ky>(vۆ.TltGw{[҉RrKP*cdZhֱmC.އG ^}]XIJmXvz@үggWFnze>mz#w| JBc t/C\(G֍ kFmH۬#OS}D|[`Cͮ7<캫L7<*&[*NH˴bQE@6^FF "pe}4O9_|l y0w>FJR*!1߷RJ*Ÿ(e<̄ %N,ZK/qwZqm_so7{Z‘_5|~*^t_q0幸C<f=SPo7)7Mn}On ݮB7CDZ4]%׾,E9.oȋ=E|؍a^#I8~fӼDm\|LW4y8pe3_9}Z՘jv խL񷼘Ʈnheu5m;?pO^{] dd@ q˫g_;Olq+{bf=J|goRy yjp{ERM%~R:P tt5M!|uƗ?,IYFm #gQ^)iK% 5ph˖{)ᄔ~b !2:6߭ ^d) I RPD&6eH,YS.:rcN!F/H*c .2DQ̶8S`w1,Y)j1j>e?g{R9C ;|61|wz|;iz;#Ww 5>+Êj@gw>[ ,㯇-ٜzuϾ|:K}BkM J y[k/K_ tT(eL*J/ݲdӻUWqc|Jw5牋;_I ;KϓJK֋^2+4KLjEJ f=qn?_o4ae}':!ƽH>޷7]:/_k:dž>ǣUf?G?:գe`˫lߏ|?ף%DGa>lrKlj^\}х3qwp63'-܋h$o; U'Ji's >}p='kC :5(TZKA`cR l3Sk3Z0Z2mS6F*MHʢ-P,aђ;RQ1".Oۇ@G`G7^b3tg=gOoBh/lsgR]׎q_~7 YeBg" XJSa:Iő1_~TIb!Z-%8uj >Bt: JBEɔ0c(91&,cB[Yٖ3EW /t `G[^˖D1_be]㽆eZg[ ;g"@I`|w?,gHB E}WQ11Er }S/W\#j,6)8:/IG!tg Eg^HkuS_6}7Rms{wF_ezAzw},Vn;D^ꡰANXSW1k>_Eg 8^6;[ s@#oRp#+$d5\585Im ^~M2))ɲ|!(yiЄ>l\Xt,ht`BC)9[&,F9\{ޠ6ɵl_)kbERUSU\rMSE6@*Y=ү_~:tR$ zPAbksl?s,R,e&M@W(-k@Lqod_L69#9" +j3qv+* 9R?d1tɾ5Hj5/DҮǷ4yUMQyG-dXjco⏕ ޓV>8cJ(H1{:Ri'͹(j,u.-CB \|Tl}gW RA526g72*Ͱ WaoM˫ʌ75]^XfuV&O.vv|&I>!Vg[cN"LB@rsVg,`o/t~ Y UMm1Y2*ūcx[UuD+qv#v<;&XPucjO vk%N42W 1j[j ZL-E$9[]pI)c1 +]4 %4!+\&jK#"QMZ 7g7FBƱ b/"BcD 'DYmOPʸVZqcD(f-+;=߹"M?ZԠmwR1BI_,JT8sLȒɥĖ%:;!#b] uLfd_\qL8OdSк g @PbIlHd\!@d9 cfc_lJqFdG Ƃ-:Қ|$`S"Rb4!XԒL:C!kSBiUAOS{ʰ>%wfF=Qo&bE4$I/ɗ #k [& }Te &miZ= F;a6Y&69DDM XEWБgL\8TcJ+ޢ%/dj; U@v@/MI@бxE𺱞5gzPu- /]oH.0Ikz6(CPҤ )$"E4@K~+p?jQ8oEYW3d@ɆSoWDQty6ItN#>) sR&[ 3Q2 dgLEΐ&h ˱a5ױ}O o{̀lњ"&C%xV3 o2 &t6#²]͉Gl|5Xq)JW!c6QJޅ¯ ʌAgxIhDVzS26lf+d_NЫEXS8C~H}$qHH8u@lB& wy7 ̜4fqҼJk؛wOͻVYyl:l[˥ЁNC ]`J05ruB߹OSY)w{U㇈d40+F:#ٻ6npIۓŻg!u&3INLԣĚ"YrwGx$E$>y-]K*8muP xâ4Fx j$3A(EmQgΐϝV;D!0*{qeh CcZ#gL >?%ԗ4t&"P K)tFjw#lwrUBzV5g_K*C{?|@m.GfeF$$}jcn7tl}uL{or?)M] _(yJM1?֔lfC?lK'@ Eu6&gJ2F?iv۳ Y~6_K-GJxjQ sebu~Cbcm)HU•}3TWJׯg 1K[ZOMHbj<+r*99խ{U|+KsՓ̮/. _q?LUܭ풡Ymͬ,;7 m#]7 à82ǍpT(\$ ލٿ϶LNQ8*'4j\fwK:qGo(R/_3UkrTlT"OZuV{?Bv/?xs~w/(苷{{o,\y4 ?E$Gpo迿CC3Z9zW608Th~qn~fO}w;(/ݩZq?,濂d6Ӟ(n*J$;u(EF qh Wʣ;z&V͜Q|e8URQNisG9}}5h*qQG z^X?ӫGߐKVWOpnb\9hBPPIFP sMHIC(uR֑|>eJd`πEk *FQAK<8-DAޚ(ԨTr*D]Br uRg^qUԋw]tv ݀;qF@/e~QsBYj)X Ή(\Զp!(}(tY7흹kI+v@G+;zfU@xs3x!RGdbxSXL` zu?(f@vSEeiȕ6DB-Er"wIt`s F83RTjm'  d;"98<8fME]1)NoשD`e8p3#)Du)(h奜o4㿮p kQ*xp l!)AA4"tG , S.=R,2Bg h 'h!p 41k0)pDR,C5rv#)06 a^jO14,aB8WS =5~jL3'\~c$NXw$9%Dpk$4DaI˖5/w3QhٔJCC&;n:aT ]r6Ny&FiiM)2ʩdM~7ޢV`؂*2F(rN *+K1z٩2J~xԜ\鍃VJoDKZsyνиWÒrZrn(a O(*{FdeW,@s?a#Ou42QLkrX.DKrRۥ"gx%O()\>.g/bv0še;S#Cnt&::jf*2U-)7*v+8KYZ?갔06ZC^q~Wy 9cMƃy7/0w^HENzD8﹗#83[a1~Ayq4l#zZSK9CI\=`WVgWRW/:SE4/H\!qɥ/F\ejɋL%N\}J3PwiL{u풡42/KT/ ="^ CBO_7ͪWs/QPZP#5נ45Q*_>xw2>uiol'Ӽa^e7wA9%Ӱ[gPy!/\!I.qRpcaYE6.܏/3ePIvsW߿~] &/Չߟ\<ׯ6_*|kgW&Dg $d^ cehG1:iR# .!,q +4IlA7{SFtz;B,_.N =P\T_ ˽h n+uP:9&l5mwƾH/l ǿZ$6q%CԚJ,Tr DN  Rr=E11]6ᰆqDY:%okp/Ɠ` mw#II_`< O  s Md1@HV+++P*pAN"( 9$\rCDQP]V ]pAףHL9qZ~~ͽ[l5@>-&<5pIMD!U/ u0DEE A"YJb'x<*tQts<ݩq"x ΢BKJ\)1Xy!чhiiEdn r$r'6 g/oPyOL @]"N*!'IpZRҐ #w]uOhӗuZ4}G!]D=na^^jC \7>(9O3zvbbRCZ9DJo,-{߫r^tn!y\f5az;PߐOjS kۧOP,*),3B;6(KǮ"A\}:5A i&J.v# J>k2zbX$q*'µD)h&8e.x뜡G EBmr0kʁlYo=+|Jjv<7)jݷ.<=:qW]*7bߎ>];Oge%yFV*L]1YomgH)WuQnbzռw%smݶzk܆sƼ祖a2motEv{\-wgfkzz~ T @mo V{iLmLa] nEBTB1d@>B RI4s{vIĈ51@Qz6N?iZˎQi0$)B{Cf"=1^] M)KҘ#*o(~H?Š  Q<=8RoٛKuEdNF8*4!$?X ]hgӋ3kٗK.3Sa_C#O vtř_dw3Pvދ=[ )ó +: 9Ղj@h0 K7_C.9$7yWE4{طqȔGa4Jwt6zzE2%ٗoŋ'G_zvtWfeF/<;?b/ι(|:Ã*>?VE>{=K-ĪfIG:Z5\9 {+,oX3{38l11Nnx8\97xU|VNF-dl}z TkB67(V^ B@'_X-?8bXd!l, d!Ոb1 I蘝`r-5Jl˵a*fo"qf*]lF{ͷ()[-m~V$ }rQiq }vjtHjyBaEG?}6 SYiBgv tVj)[<[n]8nQ8nET^ǢB*ZJ=kXB࣎Ne E2 dkᲶPR)(j|`.VkHRs.#- UZmkF5twIC?xX p,옧Ð"8f|  Λ{[c[oo5;J6u5A;Bڙ.NwDidL D܀Bݐ ,}urJ$z]>Ro-p^ xGIAFfHEu&R袲^5{8k/$SxF_}y` :=̽4:xtUmB~pg4k^oU~n1~7>8^Xi/9""V=+>r}ի}mҥ_1iwV5YAf=]:ߦ).Y.-i#A=֗BgI9rr|^gW7nbl1]$z ep>`J#+[hYsZT" 9YIDZ{Y<10NQW%;*[c)3ƛ&2hIiM…Po،5P'1Ygd}sOao]li_7CwS욵zc_4dH=֮6eޓ*dd:  s&DjY dZWML.oXR:8&|a:bqIB4* $Kp3rnkqZJXP񦪈2|$Xx B94/;[\ve=CYH]iBb^TL28FmC9o+Fw(52e];&F[d 1Z5E0^('5])VԄmP[KO֠| ZV%Q^%oA?s18hm3@/kU,'255BS $ґ29 GrD.@hEJ{>a)Mv؆f|iP%mB7[il۶<`ǖi5dX`ƜdZ]H'%}pt%edETjF7͹(j,u.UBGwv耵˔UPXI֌ȹ]3*2[qƆpޭ.0{Յz`Vdm{g45~gtkl/hR&XA[B90D ɗ*aYYhS|| i^R(VJV,&U1Mړ$b]|Vܮ8_p1Ek7Mjՠ{cQT 1*${jZ44ÓTb(NlU]3ZԶH1CΰE4{IeƘd8R-*Bdة& s>ZBqr*mшcSkD=hA#}OtQ˾-R>Q  6߹&DV{dh6J;¢fߘX2jY9&ɒTRbOZjTA:X#6#vqUG֋EmZl%EX/A/z&'Ujdk)(U  HXdC62H +Dj>g7{ыۢqǦ Aҹ~z6=:&Ts蛂G~D>.G?*Uh.YZ始QcL+bDifq>C/[O(5 {r})Xb?f)d)X/H.P=RFe@-4T 9%1"K>@)`%ZGJ|J=F0z19k:ݢC!drPhe9 EД7g rw5_@,ʤШ K˅=f(fUZ LryQާ'=l `ɐD:I_(g4(rUdP+Y`pP*& (deʦ$qZ1yD𪱜5#gM9۲C٢ ʧW %$wʘdW+j$8l('i"\QVG%EkUuJ6N v2T*-a ïj' ~ҕ_ۚFf}l.$MJ'@Bt5A f&D,3$cуh8_3x" VCx3)8kI#К~h"0@`s-3f >6KNjtn"´V4ޚf,·|Rpx"?fLF"]ٻPx ">ޠ3 c ƶ*l*t"W%]IyMt{ QP CcXrlR¢['sw]#NP귣g?S:C%v^/K%/G^Sq~ٝ6NLTKZ06!�Z/w/2+SVGEJE{9-%rG/xb>з%&xBu:r@kg6m6i;0aQu\۝?׋яC_b48;oNN'LF~~4x: Fֈj1wF̍3KRO?^(࿞gWyl\L0LO&gf)t'(G/dWt:-*,h*йhEp j}jpRyWmrI g G y[ Je2YmrL7RPN.iEMgzy mtr~py,_8}s;&è*}on-2;essbk {kyp=ک8k901R97&Mk4ROQ "*E:eC + R > m"sDzw߫͟[v[A4nm{ ^l {o4(m|<inzw|uai:5b$i%r.9'! ~]թ7_gP_btܛ9;hQӷvE.vI0- 뿊ͼ&5 “EEB-S޵q,B.vW<I8"8l>E_m%4ɐ-~LIeu9꩙KWY (cP!VI8'bGCYגmiSWkdvqݧQI>K%]WvO1U/GM/9ݛˡ_ͯ?&Oe-YM5Gg}r1$qa~/WA۝ƕ ]Ԇy9Q{6zHJz$J% m>G+jdI)}5u1wnu94m 귞.\e5e^vt ]xHfۘ#1-b LTgZ,:iޭ|5yr!1REއNra3J4VZiӖc4gTѴr'JI@I efysf&xX`&xBhSnNqH2#\!rcJL3TFڥ(xhe5q8(ggEg<8xHd_(O9gșC#2;@* %hLtڹ}{ iܥą`nMGwK!F[("XevujL6ir>u23l_fiqCμѢ T!mV.HYޣ)D6+>:P깻+5q65 Or$)2T؊]֑VUhF2 MIx$*j%6;#P{ ]{Ƈrb[܁J Yы_h#t6_j 7]wc'݆r7W3n:P61JLߏGxڞpy1.KC-$e6j0/G? k~Fi4[gdgnLso ySW)uз_Msa-\r---ټ_-MSZܳdZܳ=[ܳ=g{> ([dZ?EZaϽ@R\#ZɡvTj%aVrj%x PKl+9xT+9J5JCPcZ`Z~ϴ;sEm tt`ҝɐ $W XʀD0O!VЊ)*z$Yi],k!YqU&\$Z[D YxG&s&̸Bmwvkv>x9wۇ+t zo!{ loq|1*d,cb.-qtr☍&+L)i+L=7  >wP2Sk@{ J樕S'ɬĎ=dG!~S8Z?~wozVp`x?|uc!1eP6A%Xq"$S1;ʚiB5!I2nЀxR#u<FNJd" ]pK?͜e*0, !`g;BmqL>.ɇwUu<}[>ZtaVLѷO-s遁G`#C$ 2nv"ז#! 24ѧ$K ŌZyUP!$hU(=1l:/|XEFL03 V,ؕ,׫lU݃uVaǖm,mK_Kޖ2q1OJbϲFOPӏT*{3LYH1vЍ17FGn]&W߃ru{C0m_7S<`_ r!!kk@)&58zh\~'Qk&U$CGi.gr .*#! 3a4.F2AʤL}!&Kz_a?BP2e;'-t*zӡ7ocLکJ$< pv%Rǒ~ɀS4<$2΍}pj"o IYM,0Hdt#)E18-W*qA&q9 w;+ހ!^M/rc\'/}O*D_0}u]‹ x6}{pˠb[EY,6g.|e?* :rhˤ"]7sn/I r1Iwǐסrub`Xۼn7zo7ܽdkEM=W8:2'qglNt%_;ŸG Ju9ɲV,'D#&/y;4* &gb`sCmH:f1Y򯘜<$II@H-'aٯ?͉M9$T['&3Ix66IUsy^)L$u},0 {iO,1hh Mc\)PC֋RĜ/1qR8WVƑXʻ7,<+Œ2&_^|Yb//x<4X\~mLbD?eALB1lD m6e%rRU2p C KZfU>ʈXM]u9:cqTEpL2B'eqK sw:1+x``w6Flx\<<:Cp*w?07+ e?>RRdW~$]EacN=sf[SUAId(!e( lRj9(RiKe32L|x 8m`2Y[FƧV^9䡶38$:Q!c׽8xOE&bEЋўrة7~^< Y}[8$T:S`~})^ tqIEe[y 1C"c$Z1e"z2E8 e+k[Msϰ!p% nz <03RjɜGA&as2ƳNz2 Psi&9Vʊ )h洐GU߁-nLD֨fL K9++Y5qԳ^l] sB-@Q7 p":\#޵5$qܢ";Όwbl>8!1OV_t!5 VWVUV旗ʲ8r!(j2g#Ă. IK%p%ӏՈQHi[DƴL+$K`Vh!~yB֠vN,F&#F{G9 @Q`&GyLk&Vp"86FrkfmPZP+hQ;>xe/"wwhyߣ/j~o]оiPY$-)a#2b@{G8`X1 8MК;IG"\JlB O@(5(K)µLaQ/5^#vyk,Ylj_@p0-so4|0r8pSFN%QQi#S@`98F",b$ ÅGՂ@f<NWD*#R"%1dNF03,*͸J1 +"F ` `(`ܣ/7|2OPtD8f! ֽpEn!k%R2P; n˾rCh By `Z8JV"auFh&,sRUlR iemp|-GlB6G_3,: g󥫅H,yD8|-l 1 HE|`ָPZ 3˽ev\;)y﯐'<ؗlLQ:ύuc 9꜡Αb:f4 wAvBOս%P0 8+!`SR.*\O.9:!_€taWt(lj|ŗ20>d힄%[BOvg/NCvǽQ-4Yjd򄺢k9Tex8R0%StV>܌p^yWx}>>.̂kr^[c;(Vzy' }PSK7-65C~Yf чQ0i&{@mvO:'ll[;jc_! ^#aХW@~,ywqgrG+_oz\/og{v O|<|{ۏÃ?{ Zo0@1tIkZW;4jګ-ӴlՀ/ѮDmvkrzavD /ٯ^ȥRYuTkUM+| iyDjwU_ #ϝB(&Sab7Hh;i/b(_MwƱ|* \rmP&q8lϾf5g6Z!YZRrσ[dhEA@(Kt 0uV9r&`w+Ϋ P_NBxpZ=:-ur'po,lݴ_{هRBf;"nnVJd|w>,`ϒ,g7MN*d|8!n*_kOZLO;ar)= ׽ϏTGLRk0X\D&)$R*Q#Iٚ4%d zyPTE+QHMDc:o-KIXKotsEG-G#z!Uxd!tf0`dDDL `<)c"-GZ#g Ck\CclؽժLr>7:?ee޾]!U:pGʐ)s \@:q vh5Z`(3*h2:6*GT KIwXѡOhW8X`*=L=@tMQn+Gra(bs{bOۂ]"glP9  O ֮nyM"L Y όKjQ-#%Dj=`<j .X =!"=AZ3E4^CG҂ʢaظ[#v2q8A׃MK!:f"_}q݈|g7HM:(@XrwE7R>pMJ hy]zA<UoH{ùdz 5bv{]o#7m7˪r˗3'I;!2$ٜ!Crϥ CpbkO M 4b"[r mCwwQF|Y^p7Cg^,[0Qe-b08 K%^g^e|tQWMS& 9W߿ߡVGSJ0Z'Zl*x 6]a:,Ֆe[aU6<̾uWbP"X'U⩤3)M>t6'OH\iOF\%rz**yfl*lz /9ԃn^\ݍ`Y\ݍ\n 8nT]6B\;+WN=)1y2*KSWZ6]\%*݊(Ulfi-/|@b*ֽDDhrHNXt?e?ʆqLsLQGEJR}"2&XrAOFL'r1{*b:QK䦋D%[1 4Rr IlXUQpnTqݦzumiWG,ZPT ̱x<4&,.@sk9$d:,ØNGE5׮+.MB_r6W+\)ʵLvDrRt4H"Nqm,#3P $\Uo5JbOeߥ'PJަtچBӹw@i-7nv>s cjHr E_ƲW0zLg(jv, Kyp IƓVB #);Q`/OE 1VOSؙ2FC3:N'Nclc>1hUY:~ش^,^^ʺK-Ys4h+a\ꅻ !/ (d'9ZSLaq/}}TI-?5X)ZPFvFgm$9 Q$oT'[|<_#ƞfaH:#2iad cc4<2ZE%K8ht.<8`hՁ.jNV 8!|.@0h<%y$})G/V]˰R*^T׫, ^;W%Y%8 )*52&+iRbg+ )[Z5!&ji.`wEt F: !IؔMWЊtevr-ѶNyt4ȤB'70v<ێR`w-\5ZYC[ht3]v2H^krfa(}%Y,`pVz[1x%.b.P+hpLZN^{y(aoGfM6LŵL ey;ŚPmy\#fi ͙M) h3y[oj ʻ넸Hl G7V1S F#'HtfE{ctX+ec6uT'w0&kw$WPvGDDH`g)Ac`!Y N +Q*I~5Dxi%Q0 p"X!1JhT% z4[o#! FXǩMRBQbo΀.H'y-yyUsUdZ-;pa0mfIFxj'xoy$,x<\iB K,oJC]% d%+e)#x0g<_A۝mnA>,:D}EK:%<^'>DC%Oɓug/EqƒA[¨y%R.~E'Qw$q ;!u״١G !CǐzQTڳ@TBgn=_nۓQ?^u8hd?*G`.+3-8ϖ"L.<̚tjOq^Igz9*~yMT{ *ܱQ?Ϸ_!&EO@p3xo:d999'M0eo ]AwcuO{^橨~D}GN#]Z^ԍ&_Tr CzJ\IazAg*Gڗ^S7멹_m#gq+~MD]yP83fFN1]1-J|I!-pm6۝fqIdݝ7gxu=&5[֑̻&MVogh ٛٱ,aj _4'&__2%OYW9j+oq2Nng5yܣ !rr'Ti_k6J;ةvXٕ a!G|a4#ņgc nJ*m,NoM]u7G l84tx?LݹT_۽3p ?64SϾVw׿j M#߮Kf;x3d17Dn0agB2պC=8ԝ^?erlk}WvLu3ޫ\8 !kKnc8 G`y'`~nv M]jG'͹#pLƼ-, u "YgD]\h)sߊf$J0SP5WO.F(tL ?D%I՚ Â`1Y />Qʡ( /CO<^@Ө>RA$)ht=DșQ(Qz}fLW,[6vbdp,w<êW:r“U'۪qs)E(U~ި/pxu3ͯ >P!@rNU)T!Dx R2Bpr2toKEhBjm0wŬ!ګH.HTM{4IJJ+T^Bt.Q)tWr>+ }Vό>+eߙzMkA*HT`SUD>'`"\@Ap7ˍHlQ%es䔎Gv( p*Tbw =!ol̡~&ļ̈N}:e]yV\LqLΗZ_А|<=PIs@ b6C30$ ɖ{9`aB<980d@=i5q#3)R(Hp-)7hFMQ>$g1PES$H[ , fp=KR 49b 䳚Y_uPsW!,z , h"d3rs ZKrj[ϘC]G(o9TM4 ڨEݤ(:N?po -$_aGSg4I=[e/z}*WD|0uNC}6W1c:/FIF -$$=ʱ䑝wr)[ ެbv֟&Cz&?lK{SNjC"'t c%HF]ULp5d_5,Ku*Y?ՙ^Z\MoxW{ rfKqׯ,>]x]/W'[D/CXԙ|Ye>&v&Mv[^sUJYo+F\kmt]V֙kǢg@)!|%Y1?f̞m}{xg?MʢN]6aSi|c (Ji+%rTJ[ QP{ GvO]Bw*ԧ;^o"LD9A] :3$ՒH!8muP Ge.0^=Ts6F \GpV[8QȤ/ {X 6t~2{[o/dqqZv TXz/$\0יh\ԹH +iShc!4Ip-Z/*?Y皍jc.VɛU Td'/@3h*HJ9"Z|vsx@XnL'"uPHҜ5e I>$1O^F'uJcgM?T =qB22(w:QZk@rQ9PbF( sRT|/wL?mqL4Lg#zsd\I]:*$.AXg__-ϷeDg~(rΣTmmA]q*$Ug>Nv=i87~u\¿\__mAjoyM/.nmEHjɂ|~åow Bf2w&!t7 =i姘l:ذn^9^x8h{geCZ%]c۱㑺MϏU]>y#YB/2vWߪۀIp`lW[$*"TϐԕM$Z'353uq-F㡯N;&e8pxޒ:~w_>g~w^nvQq -Ao,¿6@ ty\Zֺ4]o~ia*ƚw={+\Y[^r+O5Oh"$oO>>;/~ۏ$EԤx(2ge1NqiL bB a@ K5]4}k>sEVT7yη`7o]~6焟S$)"e*Z7}x|Ork'uҦ%z!SZ JU[2&T2*g,-Ց޶>u]0iR H򄂑S~ y- yR,3Gʨ0{ &TM*`~M2+0`+? ~p~,I37#Kv^GSnBM]Ƨ!x4zN֭ƇKU>9n2[@/K 5r)2wITqׄqw_QZ|ʴ}U wV9b6Bݳ}Pw xDLYqniZؘ{fE*hgoTVvkQ:>8M[DѦz2H2Ii r):okߦ3_;KBo'~~z._R؃Uli WSNÜH{oi npuDEDmy-NE-sଗވ8 {0"(l\X7wJB Ke|,6KYf- a1&d m稑\{lc5qnCw<IJrpM02Ş;}Pc6 kC:r|~V%Vomyb 4_haWZdRM|T@*'i.̝ v^yeD1B7Y]if])}qwRZ1j& Uoi^dbݤ%*kR*D'QD)$2&A,VZ"ǥ8K$Ӄv{{yFl>~x5?PL45=o/e~x7;8\RPdsReBA zN\paZt77퐮qQ¶Dr;w4R\\8 k̳vvyߠ}A=Z@0w0QrIx_)'gWOs'}c:O>m&0[՘O sfZq_l$yfRS) ]i&(Jrq8 =y2 og؈>Q}P`w—SBg_=wmtx,A;  7̼}=>OfG~]5?:6>O& Q&{Cn^hzfz[z̳C>F}ysX!%;ԝO x`(k)_tcohzZFeX6:d,Z貊9+ORZ,Fg18PF*opɽZKa(U)2-9J]J3p<9Ld:0\fH5qV{x: 5?=mJ4Mτ|\4W=eP!v3rE}rڛth(4.sT:`Рˤ43XE &_ՂdKu$3fg"*hA`!jWf g=;yYWk_mu3)!vO aJX^d?ӹ8$4ny:#BBDI\ bѹ%PN(ٶI(ג~B)hF*($=aM:s#(FhUeFxI1X+1;k;Sr)ia)E/A$I3b#LXQ8.3G+j⬘P^벷r~w]l.?>~=rfY:͜`Viᝈ){c{g\($Z^X@d*}~<+(SGIz2,ƌ@~$zDfBvZ0ͤ"X&ez|ڪVd-B%UgX-PΗ/hO>NKȵ^x}˳bҩRtp>N3tWwӽe^y_d,g ^P0ΝIp 82&﹧YzFM'1{J6&f\B(zNw49K)j(29ڞ87{zXsN-Xr^rhV}Bro'/=n˛~ٮBc[.$U |ZI)#TJ$wb`Qh Hm.#,(s3A.@6Ơ2"dI$!QVv{W-qn~|Ϥ⵫iǺ^[Tڢy`h x< z!L`R'I$da1jQ*=FQdt^2IYt bbԘ 8B7ϜYE(Nj Wf'ySqǺQVyxC2 fy?qV2"%2vYb1"AU2c%Ɖ l"}02E2!P&% CsG6ѫ̸iue_l~:O"֠"c` $}NQ%@Ggc4/ފ_XM;XYy .lot頏+m /o.kՏTEuk/^+?O>+AL& Ӧr;/40N:OVV;i)8sY,cQ:=!G 1JNx$GhA^:R֦CSf"gJ-؈`Z3rAIڜzĹ:O +?,i3/5we$:F9Lb^Y6(Ǽ "% &eXlЕ;Cosv7=В2_9?b_o[~sOaBM2a\wAZwIxg:tKenkVݷ!v``+"H![uUd4HB+9)-0 PgM S6;n#Rj\HWL:BEg=REю2R(ZYai\z ̞ sl-5ݼ;􍤠꼽~u<9aW* +$Dm,a{rZ"홉[JYԦri9#/W%z?ѳ/vF~ }J=}˔)28'əNmQf)r KwХwUf>̛c/hn-L`fZ@U{T~/X!G:s)ɎR;e7 E(jmŪQ6JFi(mmmҶQ6JFi7, ?3ۉl`|&a|@ogX)& HBѿ{/r2ާ<;>v%w(u%]6L~9$"#~2کSd=:Un5>^_VT9-/\}ͥnzO^%]8x9H?t[iЍC7~?tnЍq4~?tnЍC7~?tnЍy?tn~C7~?tn_K)f֪idl:M  A&duAIE.WDRkn?Q[ݜImZ՜i*}gz=ZRV168(Oӑs!eTX {6JD@d(,d B\vJK6Id}22J>9$-z%tP{18+:ݢCo=C@e/Kb{r3p3?Gӑ,iCհ6Y>T..AqqULȃ@%$t2c̘(V)3hK7,IR[}[f,wު,c@032`Q$nsڳ'<F\299ϒ۲쀖 HfI sVTjhg]}~f+uJ–V1 qt<:k@">m* 9*Jp .p)Їf5MC^O߳V3) DX]ȝ6&娌`8Ez( UZL+_7U j'frI?{Ƒ@/I;T/g xa*1HlˋS=Mɣ[[TW_uuUҘ|[ @Y T0YiOd_L]9]]Ec4U=*P)E<&q ˰H c!cXzt4Vĥ&N zDQ0!S 0 "8f2Rʃp-ӲGo:ּ1qL 6q51ooApv\Yha}A2NI%U->H62\!-E1*8@Lj@sr׻dVK0kE( ѕqᇓc˥:S"xoo7,,EcD&kjV@s 0IA)cFpi{7e't>9W fEq)w)IV oC+?;MZEuLi}`|𓪒Ɇ_dKг/&+,ɬ4PN00$^긵&~5A'W?a8O\3(UV*w\'7#Y~-ϼ)NOg+i^[g};(U| yչO;/Bu-i\Rɰ˺fHm3rZ J?f?_y?uG.ʺA՟45wJ3_b0,vGk&?:ft|#o=ΘQ0ih.=IpcɈcI60ÎWvP`G F )~!AY-HёE"Qh]jdӉg'[)jlo9Rxt ԙe8CB񒰏ѯ >}) ;GO78*é%/7f:BWtr0L`} 7]J*Lܹ_JoW*g#)|90qQH&ޢVg$ 42p诓8P4 `m0%sFŸk_nkEtn9abfu+w(|/ECUg23G=WfeIr<`2vFMsw2TԫS79}'Oj+[rƱxQ*e3smC Pʍ"6J:2,"1rRREbSZKF0:lL!X|B?xp'ĮץOO!gԺG 11`Rns |P&{ӡI-~6N- \>wyп>kb ;#ok&u NTW>*LW?'-ܳdB}hKo'Ո 盩35ESÅEtۼqz{zRrcX@]}/6\^F핎`^bxղoXf2(Cux|k%v)[sH,53ShDIB&XBuڛQͿ)N顷lYf6-}ezKz_/"W>JMO㼾qE$]->ywbdy.%ؒ@e'>n;&Q!^M^I<}VPPXG-UPqֹ)hb;ٻA pv<+!q)7gS  =* o%"#²$, xxq.G?teW07mkNڝCik 4x.5t"*, xCрmF,[JL.0"~ І7 (7Pf 95ejM [_z iD.բWLXrwE7R>pMJ h83YM/k_r{5 [@ |+m{ۓ (d%ޔA}$R# 1U2$A\:ip8'Z*p>ZB^̴>tqe9ɀ\0s,zMP^6ߕ"kY"Mi= >{][^oFlrovqsHM wN- 'x2LkWՒ–FZqɶ%{"3|&~>ͫug!L{)c.jp8iΜ P2m-mh[Xl)x^bD q9P/7tMB&xmKOc] #zT9F$㑅h|0z0󌈈0+Cʘk]< Ck ru=x>YYmn= 9l -1,O\S~SҖ&oʁB㕢!Aփ-8[ghUldmv>tjb wUTsc%bDxmF'/[Cϭ||yuO*̋asXSMZ= gOG3woz_=,ZPT 1PqxO#@ǭ~Za;36Zl N;̃@BX2H'kl`a k*h8+ i s߂ZZfycmlv?{Ƒ_җKvMUKb6\*N&*W?%)KR~+CCrdQ*4ӃFwhրA@٨baT:.4iiE$uݸA{;&8.Yr<}}KA"t*#Fi&~t yEAh乳([̷}BL3jQóM6 Bj/mˮow# dKRB<1phMS32( 8E"e$wkc1  b@Fi*/b.V%[.a(,p gd>Hmu<ү&L\³x^:@8rRfn"Z(Ty\u5SxfMR-+uꍏ-ץpR4ےQM$*嬧ZRljs-JsIqK,_dI#XfSgٟſE/WVNXTSiNp۽Mͼz{xSI SĮWE 8[3c 7*~|f&C?7["CuӅ=&V YLꃿ-򯴨o_^C)T1]87 |#^Cp_ˌ3N8 vۓEY`--&*(p˟Q*׋J?}-~M;Ct67ҊƓR=|->0Jn*X3Ae=/͘״ 5A$?|Q-;AM;ܗ 6ܝP7W!5 ̅JF)4f029;w|+ %/|v}:eH8)Xf".zց%f)5H=͌¾zD.^av7>Rm6V|/w>eqp/EeV_>$"B6qf(D^͸4/(M:ƦbwC&hZ4|\+ 6uWa`G͋lДi=srғϴ>e{˖ 㫗g)s<-5E%g=1`fċ^Z3v?z~/HmG鞡Zm ,U..WE\R¥'/p0 0ce#"!Ma 9mf H<UXVl;c-HE0k1l5ͭͲiC{B1xԯ3-3sgpe4Vϵ1S~>R쀙r܅g9i0yLQw7 ǡ_~|u⁩\'٬t DsyrUtNss0^+sIK,9\:+ϣ$&R,1{%W{54׌"4'ߩ:)&b7wu!10}\, UsOĴ%aJUT⥑t_KQ7/A~O/.j]IʋLX;9Rc>`*4t?F_~z^gQx`'RlZg X8#S sDid`dž Z ޼23o40uig1ґP[424pNR$iyNy2ƚ{D4:zZ)(!y -ƀ M^b5Xa E s ^>#k(@BTs'$Z#LrU_}GkQRj{hXc/]d5ѻH_TIm2K{zHH)!tzrmܹ/1a#A29HTbb1V;&^I6t%WRDbrMf3h γlk\ ·cE떾X߇Ú M9V~Vi; }}/{i?{0| }L:1x+v΍Z`<[A;q܁a%# f?)$~< Z Cg>݀Ew;#Y|xJ J;T*Ugئ=t_`o(#aNQAg%=YӖN:F::S=zdP(0S##(D<K$td>xP1G EέRȰ,\怚kZ )$x/N> U;/k".Nv*CkB7Ny$ՇbI䗺Ȼт$sG|#|MgPAzOi!k$/)P Fϙ04e!ڱUM v%Bm?x*Y?d1!(B aҁO|4JZŴpLRqDAP&tA$B和; G HJ˼a h#0wHRJƙP)Z#c)6s]-mjz{BE}yҴ易ɻmG fhkcJ<_XC`yP P;$ۀ=H$Y vh2Q+z:$X[g@X;րA@٨baT:.Lswl@DB:&8.Yr<}}KA"9<%?)gC*A8BQy, ָ/̺ZlS !)CK)j۲B4{ ْԆO \6Z/zӑ{f\ˠڃF\DC)4XRN}(UTYb_oU%LQ 6Rq IƓVB FNS )K՟WQ)D+F̫ DPvc~P`7EoT$|H=E݉L*p;zoR#P~{i#@ t T&m:6.7oA*5}|Ů^v@*- 4FW/a&o57~D hl4J,Fqzit jJ \ajAңPup^օekn1mlY\Yo|l_pwlM9p\N}܋(:'" \PM-ArE)]Wz<)K,_dI#XfSgٟſE/WVNX봬}N0WK1Wo/?s[Pr|ph vN|>ԉ-p_}R ~oL 6ad4dY<- r.{q2~ZB,'ᰬB<= \Ix|Wx٬&<_C}p#&4gs}`R柼L1bW٫k BwE1^_?>W3N?{F!_.2@pXaXb h"K=x_[ezDl%nfWU,VMH@ƓwW~,}sxswXD|BL~N-,;FmZ՚, -6;B ]ԎE,z ΔZzIɯ²pS4['*^IZi棫tNj~[(Z%SH읚;xOd<|hxd We hWzwtFm~»豨i&vvQCe_w1`gN]׷QݻuC =|Cr F *bhTtKhxkr")) sa8{w|]鉅/|R E8o@?"^<^gLTIr>\41'ˌ!%S% XEZ)oy_+E3گ/o岽}?9z}:\>_wP֊u ipp4 }ڌJzڌnwxC`!ril0`&ȜK'e%ʰ'TUʞD`ܓQ087JsON3nwF9LK zEV $B,,r2Cp\)0fr9)sNz Led[=w")!~smɂ.W{tãy[Jm.]/V>Gt[IB u͍/@hls3mSf/uŝ66'{ ;vmIwkv.{^k&ww -~~9V2|Gx?vSob/[d9VW}kz񺯨 B; }rmAl -L"Ǔ6 [YvA;W7ǥɛW%:?ii*_HMnF1 F8OV#S24*'䢥0I%dYkv#gx}3_}\B.S$з=&m;6nW2~Ѡ ˠs,J~n _/OQ W;iowfh)48|2}Xg؜ *op3'<,~^+vzNwX]Jɷb:ɮF&HA7oWJfh=kQcu:>/[\>.Jļ_\}e<<睠bV",o%%q:ggbò 9~"2IbVQ8omM@`VV#颷hRJ +@ +5BF'e#o<$B@"=:R(x9j// '/*S!K #8ˍs V&^p` rB{+mAӦ>{S}Sp'GlOG1_bZZGk e\ZtAvEzg.*PQYy\FE暠k"DGOqęc)ck\ϟg+^瑾pk LI)8JH2]*թ,Uqsd M۔!^]aj|p\V4pڕ$^L]¼i[G}ozݻha@xO96P)nnm^dA#cn oݯs}W, ;VE\Lۺqz/>Ի$uq2nnbT'1YSF4 I΀l}F='i49JڲlMN @Ȝ;HA\[U47~~R_|k֮Z}c3t8g2ԿeЭ\]`׆xOH9i|`q)\gȰ$0uq t<(tֻLtl=BG# ,,-⬯@a+_mQAvEoYe19OAmbι0m _2%od0Q[;V2FkroP~΄=5rQ$ ʃhoP*䦔zOTIN3šL'(FhUgFxESԭݧ:۬O)cw1ȶͥ+N p0/Y贑GB䙋QY0Q׭̙ I@vu9!.%HxyDHH@H-UH 'j[P-2,jS=w ɡ݉dޮn^OEw{Ԣy^W. C)eaπ!@k:dic uRX*R\!S(Z ) N6g5sϤjkj֌J5]X3ԅ0t.ڀj rEVt$p Y,1&6D]!eX$Fn}qE1FjDUY#^#qk8=TY2'd7a \"$%aWeͷ6sG4%,g̀"ILD.z&BD#YBU,G*kjֈ.^u 8]V|8%_g5.9T/zQz׋8e.D/eqGU9 ;hF&0a ,ŽKz(zTa58TBe}x`TXŲX_6WUf[\BяBYV j*hkr} Mӄ] I'#\_*}g0zP.zd$11d%9\J-f4Z2A|8d6`,J. )@rC*(oJq!#Љt 2V#9/W7*^ņ֔1Ւi>.o?e-˭@hٮwTUGdQD ZFqKHM,T,+VFM%6 z|59N+af$6G&9oYeL 2&藗t\Q[WU"@WFx\"!\9^-5pױDCQ@k!"PS-P{j aKI9i+p2*-f4=ԪlI>Ff\:(lJQdL6Gk^ym0 U8()7mX'6~ᪿ'Axe:F{ ¡x{ZEL :O9 Q;+~!?z'sF*|-kc%Ȍ \@2 ,;Srg@r&Ƴ$z'+0vRqȡ*KiYXnơ&MۤY|$jtc$J $J2 6y`i_nUbpr~ckRQQ&pu9LjUR#;B3|QMxЙО0,餹Ռ#>:D㜎!({D.KRJ Qo Rj4 vPE8cvy5rL q5Yzކ|kףYH򥉃tPZ*V 2"s:H&"aMv!@ vYIh5^ MΧ%L:^IQAe ʫlY c$==2Jq+-C^PDMa/u1Qs9M1G=B-;ga(\K^B A5 'lkKv+hCbnS&8!+0Lo|z]|0>u,﴾LZ~<01$S 5}θ%c8:ag`2l:#%x}C,-Z+L17\rj ,>N.-srEn o۳p4+sn~YΟ>a?wx;"YE&z|v}s~\hQjŜŽZѐ?}48cգM9%j9G/{ϴh=7#}lu|v:8{;Fyxuqͭtws("Ve>k+.~GblHoIG48 YYgHƭ,XVhxݘWW-6*Qiԍji9K)kbObz>%#~`ܝ}ǺNN ?ήFA$v?}T~kmHЗI0]`,; f &25E'{!J%)ʖ%@쐗N_ o񻯑u8/]ik FOQ4U4[Rߡ\+rOk^xz'h2v7>OR;*4h7&.&TQ#] O(/~;f|U}*.2_z;qqolܵtFtM\z`W9[SI#w60\<.o}c_T]\aEnWN/{N&u}mԙ*dԖ=@$$ɲkR;"]^{QGG|׶ .Ӻ땓 T%xYq Hae)UZ(*[ͅ'Sd>SQڪ+npVg{5zDŋ֘/fVd[9i:rȣgp㛊8GO̬ngW?+ nfOnfѷkb1ͬV"!.:ˌJŒ]uI9v-98-:jtAK`,CB|.B^^7,y,_- [c s׏=p)jv4;d 93=9! =~hv لFxZ8 jlit gR贲l[Ga(\q]`L$5j_:b].a7Lʙm"S{vғ*+J!+ȶY%òBٮꀉ]ZTz-t8`ۓNJ,yzή~3w*>ǥU)}AD ,9b墽T&:oO&$xbs]оymsV2;v0pG1\t9N!lZߤ0>?d)S_,b&ITJ2R6-o{.2}7ޫnpl^hTbN!VCoWU] H:̞ =qfe;_a6:HbF>+TR|;uI8HFա/+ʜDqw(=)[.5Me>W&*W޼`#Oҫ {=ڄfprNⰷy])oCL`fuՏW&'4} a3}2ӇZa~8}%ˮGn$~=9V,k'L߾C?LtqthaL3Z46 9Ahv}<.9Q!}/o38h,.H:;Ѵ%-Cwm7^ؗt1.26$ IV={1䃊i?9/G׈oi/"ܶdd "S{>⣮=k oZueJtO`MZtL-;ox%W[]*ZhFg^?Vm/<)h pK/%w}w Гɯ' ;{MN.prt8zek鲥>_img-G~mmŮ}/$ Yu^>{MMi$t?x=ކchuXl5U9f%[ xA{Xׂ8v"3'OO ~Rf_}AQ=zT[껹ݶO/VMlę\~ka qs~uh\ݮd7Vҕ\\;Okb?Wł9/ߨCY|`LOwF bPo h9If;)c}0H@]2.%ެG&!%;Fʋ?z,juWKwȡO?wz֤OhWun>L)tG;`.9&5ŷkW Vzz;vk=yZnط++n>?t]^1 CZvJ}*+}޿IK Gޥψ3_:6q'C,>O\>OZʧf++q#_qP3Lv;gہzt|ܥv74^-o*ɶGnN{ī7 ✪ӗ4'ս _wV#J`_.6KpR۰tCNIs׮/ܷgRHF^뎘]=sz%pfpEO;kkKj#@QF\񫥕rޣYzn:a?am>r|rKgrMf݀07E_dW] JIj!TlJz6JBQJFWDA1rζo~/v>7I;V=+.v<6zښƇX:>:\F@IOde+Zl2M뙴l^9u]"+JdBRJ,RKmCYdUx0W+ i߶{&T&?keHHfHkm)xtEG^R1't'BVR5S1xFizE::r[椝tK#TV ,,gYun|sN^P#&Z r6%U4oǬ:Q1ZscIk!c5ţ ̞[}*SphmlrBT&BI1VtUr K`s1t4Jku&_m`kPg@KJm o L0'hW8rYɤmk(c>g;bPf Ԣ*%V62JZpN(EUމj @z Z%y7$&O5`)(Sp1/ZJD 萚D֛C;ıkEvF~d/ (%k<6֔(dP&Reڈ*7)@(h E"_ :[5=cq1K0*86YkIiX5Ȇ9ED ehasEΡd0(dJRx/@;;UK5จTuVBBTpe,}ldBA˽h`ÓL~HcV# cvHEP"R4eA"IS2zU|Jb|Dʌ JC3C@| 4VHH@ Lh-W&SfHKUFA ̈́Ks$.lS-AI14;Doƫ "3)_ɀ'J ڛKhΨD\ ̑ 0 ,ڲ,,K @+dr RN]TlpХWރ.PDSIلhFd0ȋE/ZVDr`"JQq Z,v+upVc+:c"&Lg[`q[lY6bV#9)p */VFNDB:}7fǺi²`æv_ Ny. rS405n<&C8dglB|BBG1 yPr $ 22#ʄ(|]\ e:o,+Ł6S ^\D,/((nUj ě@q+2qm2VL",Y_},Ӣm)1tYI ' O7Vzm{{]\P;^9TDD,ʈkC4Iw`dC,Š:"J t*:0B֨52 lwp)i,#m6(Zю*!wp d`:jxrI'#nHzPΨFZQNKFWbgZ{8_!vf-A G&a'8AuU1MjHJͧ%"%2o]}֩STe.9iQYa ;j|C!N"1zg pHce~ PڰZĤ=N0)F=fjR7XG@YjjPqI<(b 3t 46O@ײY#9}>|57P&q]KU8ZL(jx7Xv@J&Zj/= kߟ֠)a89>̒!p}Xf:@K c_F |IN Yj^ȈPnBNr)\@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DNu)gU+ p+ h;:E'W"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN rHlIN ,Y9N [y''I:,{@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DNuA$''cMqZ;B@0(CN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'8CܛZoūً]y3~P s{q|== YI%:q jVq y㒗02.qV[on9;!Z#NWz+DŽj׳]{0J]WcT#zΡ抸 x ~s-{#߃8X~/s|SAz`pe (~k,bKWnQĮ,R="|$ =L)F"\K/(%¯G=^3&-WONWv =#~0Rwt%Yf]At,5t(#:Erh'^5K_ m^0`0 fo77w8.'J}-ҽ0|o&9Y+.*zBKw.=g?4+$N"4W\a<»Awr'o<~MvқGLl2 ksߠ{9"KEuEeBΪQɆyf5խ>_z|:F+^wK`wiBe S;yv|=L^`?s3$ ycnQ@r_S˯R=?qFˑB-oS[^op49m&P 2'o3 zFȲ4րk s{_;Nq)҂'}翷bwsg2e}nucEGd?[vz% ΘITlL1ZWDE^v]J$I VB֨XtJqlIt(ծt(':AJS x9A+QWt%:E2IVR6CW #t(":A$(ڻb Ṣv\Y9EBϒӟY ^dayeZyUzTJ<#p sq1a\VZUհyTn[汮 ^RVUtN=PzIop/E!ZT`D-^r)KJ C*ULBWVخT4WzJf< ?: xbuuZDoCi; UЕ"zh*FDWX1W ]!\^ ]!ZiNWR9S+F/}s$מݾ[H$ 1!iٻDM MK%bCWWR >J)uI`˙\E{]!ZNWR]"]i뷤W ؗz'Ce Q Atute +l+ut(==:E{`E1tpyZGR+ĦyS='xF6b6:,LcS L9/*  D;?EF(㄰)T.F\X)  5tFB=A*-ײ$1*/t%-L0 ҕ W]bCWWR juB][8U]`ʡ+u)th:]!J/NB ^ ]!\[d:uB]"]Y',I]!`'++X1th:]!J鉮NZ(VzzGF6;!)x0eېܶv|]ٺYdR!+0F7Ly㜳49,75u,\R.G6h7`o:@^_%L =O.WP U5p/!ҬJ x,/_u~ 7pNV*C-*DV+}x?%K7; [T?N%/Vbf~/QjVihy[}GU 9:-pח+|0B*]yVQX?x6 ƧLڞz=`q s|6%r3?媜WKTSˏjN%tp5`ҧ!܄"szq;sr39ĸlP*\ *8 A57qP^O p/[Z#ssG0ɩ́a8D[/7p)"8@?_*²ף+,  ͮ@Q }ouz gS,h6nv'{o~Ի$ |g;z6sB3w 0W 3}G3ݱfTmH ߏ zTSKWՍbFquskC$z&:Tz8|vjp)j·Ȯ-hkoezŋ?xd<|03ᮊhG/(O3BC_sLJ6I'37QT;Á,ϒ5ˑzz:uci?!P  Qtݠ #c8vAw/8ɳmv,ZlN44A`utڇ\WpqZ.%.L7 K_d[e5(l՘ZET2U)[hO.Y5w/{`oY);%m=d ֻ 4p^ ?5:Lo3w-N6Ae9ax9L_hin.*>tOr?6*||k]Cu W!ػ r@E}kex8o]G,?>vU[{lxhcdʃzd8ul| Mm GdE8o[xZn*b(+z!%Xu/+&EKj#~s1Czx߱p/WVUkwDovaM7Md )P⺗#Bh^`&#O m/{v1N~=Nȋ099F5Ч^4+OH?_$9잜W;:jg{:W!yR߇c8u0lg0BTle!մ(,|_ulct'}DP|DQQp!*!E+ |WYb@," @f %3xC-@rR!21Չp.yg+*#g0F6`2Wigٍn0|(0,\ߢ J_[>nnw^O>[K)hſCZL]u"V\ Rs2PUӺIv+gƹvyָig\0]|24r燃gۄ3C{瞺) 5|k9m>x]vᷫ5%8Z=Qa"Z2dXnk"K䣸ۣvGhw.X9Vc5. ibp!ƂI}ŚbY\:\)Q'oMo{Dsė-"0ՙ*z#H0$?]MAe7tWjS]^<䯸K^q.-NA0SQBQ:&'ZqNy8q1\ۓ \ڪ6Y w-B.6XDXN.ns|Jn 釳6;8n`TW麟5M,CN?mGY™}}j%˗*FdaVIت5h TMm^q[aB-q>`nԹ}:Ihrae)=asqG5u*4^__ ;ߝ}曷^ߞeo޽}Οp&pc8YIz4 ?=EAZFl3.o1&%yŸKqz ha n>o㐃opNO75ҕ bwKEF']K*Db-B )BQ2 z2;.euԣ$, N}c~`Ixq I֫ ڲ4֏kRc:'^FABj,.ƒB3i())b >uU;D Ɂ')dghN_(pБ@h"e dp64"*@PE*D1)ĢcOLqkףjxz"+Cߪ ;˓yָ3d-^3'Qy*MWEՋƝ*7y"'TnNEl qӇs[ E.8?~vv{* Y|̡!AXQ(u dpnJ@+S"j@+&GhOSOP~>}'^Aj稳^xãQM"aGNۜ{vȥs"IR+$J")!d`$j1!15gZiS٬$fof8A⳧c1TRNqT1HdJ75j̔U)iABeaP*JL2P`Zc qj_v0tv C.gSOT摞C$Jhi .CtRITQg)pok:%*EKZyzx~U卹^0ן/`LJ\Z{].{/`Л`1B)Pz-ލ\>7 L鬒9a묶ޓ_JyVW jS V`ElmވTHˡL|6Q.pgGw1btR**ku3`epلtd3 ՚1Ge!@\\+x\ $^{Xb]R嘢)LW!KI!{!bhe]Ȩ1.t~L V9K%s3ZWԒ,!YZC܉RU5Q kjsbDP_$e kNΚBQuXZr> ݔ?B-XNV0m"$JŽiď7<2)e#і$`T H6̡,IXEAQD51tB)r6/T8Վ稖n}C6/';VbKRyr]^(Y)**5y't ,\m0ά3$4x<:qRQɨ$R)cb┡A(1Y}Gp:VQ*K(JKbltr7JqƎѣ,ܫ,\:>ɚr[JߎC{j|~'_Ķe`|*BD :My c\*0< \d,MtP+51#^N $,RDڔ(Ef퇃81Ej]6/,Qj5u㨞hN,h9  NAMKՓKĨx6Ie([(I]F5C*E\ G٪1HjЮp1r6猯~,E"]%(,Q"%:7=*nbXA<>rjptVjPKJ16)MyԂ,g|P`B@$̈́NSrTaX(UYKv\Gx8xʞAyP*S`sQPWhhrq/rPa1UpG'=in}oȭ*r1svն Dޏj{?ZyR9M2Y~*gEQX}œB_\wclp !(p 99(G4Z"]WBPQ9J^-A4a)^ /ҜfICich1rptBz@>K?tZsQCٔz\qĝ7{JYZ@Z*i1i@0-71!2aHUJN Ǖ[lVmR/o٩wDpDH.p90#O$J"  _Po7$Y?m帝T>$N*z"KQZCZ@NCZv (rd[;*S -AOTGVQRDtnQfvޜEZP m3%bl2 dR m{58F21q2&\Tr˜&i_;{'jet("آZ24T#P8e$_UNWAtv<']T|HFf圌l 6$lA \uuc2L1qtq{՟N|%ICM|i:zW#X)ŢжE-Yk1#2\St\ cigDDdkA>Q"u6F`qR)Wke1YѨjs nmCrYe3G?c{juo1n&ۓqad8 z~@塜'$L&OlBӋ:Z->%Ύ֌Ab=Xf YKI~*8ŵ[ZeދZ1zH&Gb @uk+0 g$^[WMF\U ]لw{1CT: erL< Ψ"d%=>U1J{k%} 4~v˃U>trO*HWy;_||AlǹXZ w0q|>-&H/gbd^H֞221-5;g+\>7_o.͛mR )"[ROQcMX>Rg5o]ͯwz4Zݖ{k.#e2ѡ2k>هdv&׵\ƌgwK{}}eۧ_}98ْj~CVU="^&}pX,f$nr=h4qzCd (Pw@i u* m?2S@*Wl-ӣ>D/j8X~x+ל8ᨯGݨHw=_X\ʚP֝(v@5Е~=>^<3е9NH뉐X쎴O\mx8=2 48ߟv\⺽ă $K1sY3zx0jߒwzc,ssrI9+@Ȋq* 2Dt4mlƚ((V;R+\!ôWm7lMa)+1W:/vpqAA zPuMV7\DMr9 YېR6*u ښzTQؔe6w2Ř,9O9 ]r.PeGFu_1o2oOL+2y {jvPowYFz?2q'rmwŏ/Z h@ufU@JQ+?R=XXcq!T8XjQZKhflojȂTRhOgk :ej.H9uFECK63J&(Ώ 78uqϢ0}lPHbz7 {yv5ƾN@&Hmhj"d#Spb\V=c5{w&=2BRRj XIܦUp GZE9RH*a)6*.6*W/l8Xr2 tH1i5Qj%+2UL8cpw~I]1,?tJWš ] QDŽ:L. `4آ\S7@6"p1:;Sy_-gKM7h3hiB{)Jݻ;Xi6nsqMZ'7 iIKs:j݌ ]S^,ZQekĂO/_Q_IBBvT 'p 9~={ynPć)_?秳/x[/=tq&/+#k>#9hT*,ꨭV5jLjе mY%R"d!:9kӶ1!Y./vx N3*rz-Zga],3Wzէ@}|uu֛!@JJ+^ q PHUFT2pϵ57Ÿ(S9`bJVNM{).ޯt`~QЕ6ߝ^]sx6Zmv^tgbS!a`ZҺ[PikQ%&^\<)V 5r%hC`,S .1q"G <X5-ܺs߈;^gu:Źgk_vs }qn n,  N=o8< >o<<ۤ2E}/({w,W Wǃ lXP_?:Ŀj4wu቉|_ڻaD֍C`.cp1,QȵJuHT=BX[g,KRIRrbNcV5UjYsj \̞s׭.{zGIE7)G'Gg?hB[{Š)8!3fN%JVƣONbڋԐ\$b$hZ>m0 1$#LM13 hDGG 6w*.<$Oq!V$ҭ|n+WJo!u"/( 8 ]02EkQɶwTZZJ xlgĹvW`fenQT9/e6S"6&s wem$I~]`J#2`,ucva"/cRmy1}#D%d]~2*+28|R B*WDAO(`|@KSA3W3Ggu:.b&娬dibZ>vP5I;lwIvAvlA%G@QsoK@ ͑`gUњIgG+c/e۰5XkpѓTBJMʵLY1¡%P۞#kM40B1LP 0SmzDQJmlˋoMf,Pjv=̳lY4GcS*b E-T6skp-=:AE4%U}I5y{@ġ`=Him=dlP)@]AXy}eQW9/E<(  V)LWyDUiI@;b{H7Kthf_>D/___=T .`D(JWѐ?|5sO6J\8+\F'PjԼ-f?kwo3oΦgK!sAl2G"ȇop8AƚFrq$PG:oF4Cg0w-Ę,X4pp3#/giUGdӨjqIxM5RV>1ɯ+c~{{rlPC5''{yF7?|?N'oɻIf*R G$Im#4ۀCϛbhhkho6ЦͷN +7d[N ~zICJ6hS6n&Y^Bd2?a{aܣ3o5_8!M~nW^(&I %pQsCI$KY*LPPB x!s)e9W9k3vqɼCKt1&RȄN$#8avmB[nIt0dMLiecb[o$xgtK\0' TG)*e}]:-5I*ʡSO+ bYE҇mk_3F5u6~{A@Wlg:K&RY0h_W#ۭgGh(흢u%Ƒ945sQ3e )I9+?nr-rI7T~Ѷ].]P"k㺨w˩mMvYp?֪6lWГʱCG{BG( 8_~Z@VJJE*Whr"! @[ls2W:PDst(z-N8gPk˵oe47,2EM)TMBJA:Le|J.PA@(YkCRhd-y$7 {LgJ , {Wt]QH'Tzw$ײ>}axiذXhw3!}]T\}'gk+l5뤅{,hxHc}<*=,竅4C#9KVy‹8N4|[z xHO KL-9;mw`éJdPkύDbT0Ƃ <@׬J`r!F0)ȣ`΁h΀B+>3"TjK(CδyڙF2qtІRpyŽˋ6bOY>|nEYoX*8dRb^1x 2AcRNHn9N]SoS ^ ]K,؅%odX-'}7Ȭ&i_IDj`İS{O*m*`(*LL0t)ݙ2 C&Z3hi^J9UZyZ8$r'c$x2 ynz4 &3BtK}`V#YNmw/oj'rXkɼDHw^՝ӏ;E&nqSk/a軨6.Q6cb:'Un;okT7o!dl(+*}$%3eXWѹۍ ]$$LI ef 9(ǜT^ۜPEⱬRnӫf^Q#5<91%k⁦3 ZH0E[Ɲ4́I5{ApSM0ZDeF F1|)iX:BǮ8D{UBnJ١ "lIlj-,gdGgIUz`&dI(K-AXE:Ц2n3RU,QOYaM%DUS-%kCc1IZP eQ 2il)%y17P '$D%;56JZ~+>Z;:w܁r ]<Ox{ Q,_ },Nsj6(7z1K^LSѧC?FIӭ_IV+ן~،Z0ۥv,NHyuQS$(=Oj@joًO?OktNZEIM$}Z6ҹe+tz3I dY!5\ˆ}_&_չ/_NZgD}$a $"zҏW{t}5{zčsRKlԔ"z?=pi*>I#xK0)9<rF7*$*,V(U]o$oΊ-Z.Gtэdfh24хGUig-{T.OU9:F,^ JJ+@+1W%Ks t8ꛪH}߫CYZGu  g*i i}au7˓Tʀ e D< JK!b:l)L!‹ך-EK9dZ)1c 13MQ;kI) ,1y͸Ȝj|i` TK|9o׊m#UFnrD'1; ,pھ1Q7j&`vamL=0$'~Uaq4`0Tg-1(t% G/*K \glvUYy1ie@l@Q XFl jeP@b]3V9ޗBTڭn|G_g^>QipVwaQsg]wJ>{><=i@l+UX\&UAT7n=J{`;h*~s>ȩ FyD-j2GpdNFS]T6j `Q;8_Ug5 J AdK2E3Z-8"@pMЁdIHwp: fۑ9UoV$IggqLH>=Tq +4":[Mxn;<Y'whWގ<|10CYlݢ/A*trE{IvĎ%ۉLbSԗ"ÇIBJ`艗@iaH$JߕCuB )]gn&z'yZĐW<ǛE?w`__rP7b|2_;~lu,Zp3>Jm(K eƛ;ӿ`NDO؅'Y2C! 6-p'7KMVe0dZW[Z\d@oyvZ16u}auhMg '{bhVM>MDFlѷֳ_WqW/K?GA?Nfɬ}mr[KZpG~L\*_=^zP+R9ƒ*Չhy?[zT57K $W˵ӽm{'âXE0Doky\w:M|3<,/O(=f=yO|%]7Smnw YR6d>CrSoޛdiv񵬚YYu>q_aEsZ{"NnC?G][rP00+Dfk Ӊ1TX G` eJ:O2Y(GOCq8ځי&L:2sTHr!4`AV@=+C~ǧnus}$N=Y۸%}ȏ^].c:;]{Vx#z1!!m8϶(esmz\>`T 'X䩊MNȳ"l12t609 -8F.qB4Kr+ ɵb%&S.#;"%:yx!NɂbRCsy*(O,ISKp[]P M4<2U:eĖ\&^o٣CMjq[:}6׷1*r{߷pGҗ.څ'O4Y재p׫2S?* B}%8 2fƟY1u\>LOɇOg(ħ\Ht{XOvݙ|Vogݎ<(ַ7uI4t۩ӏ=ezcw-56g36)+>_+#  8*h.(~xvQ%ysѱM?(&?iۣb]s;N.5/|rj׻ R4InTeeD6pB_~V;81^G/ՠU=rR:c[7:~;^w%ArzDyr 2WQLo.' 6.kV7ٲn}wQe|,ӨEa،"hf]/柀ˋW"V6\V{+˨/BWJp5@\i K Pb >}|ѩk ujKIh=?C3)Q9U(ycZ MTٷiQ"F! i Rz+qZ MP)B+۲(an͹Luی< =גn*dv :2Dp) {BZ!+T)Uqe KJ&*z,Xċ'L/74fTl~u5a5Igݬ?vg>SWAI?UXo҅Os"ϳɿntmdU~w`)6-XD(]7i>UtVw#aU<_Gx}%6,3 iN%_2sQي.~QZNTB\ZrLET$)p6e<[LW6zD4q.Ӝ]d#% ɰOyMFf(z[F?׵fgtS]< _U!4ҧ5zc\͈2wU`5Lz+qi< WVPw\Jp5D\ ?Pr7@P˨;P%'WĕVS+|U Q=P1Wĕ2Lx챶2 P*WCĕ6{q9 r P콫U" aF3_T.KmlR+y簄Ճ֩){}^bZCқKTkz?V!ֲfxqGaa(z+TIq* Rp%HdD_Mdg (Wwێj4qQi*m!UOeBl*Q;j,Rb:.Ŷ[AJ3Q{騲60Pz8h@\\%|r3C< * Pu*'R\ W(|P5 r|*oV R{r`Pf*Dhӥ*#O5?=,ޏUQ c!7` s)ތUQ}\}1b-TWֺ=fԲ3M\vS{f]bWV=ՊI@0W(r_pj;PWCĕ%{Q. 2w\J+aKW XS J\Z{Ti+i0v#\`~׺\Z+T)u5D\)c" kur7BW<>j8Җ*uW(_pj}zAS\Ty}՛7V\hW͗e(~ S>l8Z \xΟ3?/M2BćoXOn[6,@Atq>g'1"1k!aRS&Las&M2ZCBO?1*j'}^bEB3B,7#>%Gǭ!0l}Il,&Lqrksg()lYӞ+939UusT8Jn i!ZOr4?qts):o ד?)ݪJB*7eBB.ڬl j>ק~ NǩX4iЁ5 }P cnN-K w䡊E;ꭢ͙!f-DzϽ#BMTDzo%[Z`$MԵA3Z Ct%'+>'ѢkMcn޾>1pJk: kg 끒rWj9в' 401ИU7T3F+PtRU=QKʇ< h`D6dڦ-M݃666ʒ&dK*0&B11ИU6F 26{WߨGJ<D#@#.DO$򗾽8˵sU 1Wmh^u*L%;KT}7D> R9 A:^yr}4xNuuhaST{Hdt=zs|XvIkNֹ[ʱ'Ws̘HHI?̷2Sh1$7'XK!Q,KBJcuߺᳮ/-@M|^YbHuEQCBNFG6Tق1K(#mtAڳZaB BQ:K+z o'0L'mTȗBф i'S BAnQxi h**u(:ݡ-!xy9xi@yۧGta0P߸+e$Xtypl5M);:+BfCLȌ,Hw0`T$XҼ1 [(9ԠMEX m {Ze3@P:);O-`׬jX;M*ĬeBGkl}'I9G썬JH]鬇-6AeU26HQc9MF2mv^Eo{jPB]:=n +Z#q2FM[5[zNB€(!2] 4HAjTy>:LAZ 3(RAUvNZ'$̿2!`xΧLgW~eu8'ZV [ŬΎ#D m3bka&a=Cw /M TxU2GH}1[Zm\U&1:䉞 a (hV( {By[ɐ$RQd"5B5 C04X yy{:%+&CFPr#VH܁m CKEUfUSYߨFbΩN&dPJii&ALv|3|s,."O֮ TG<)} ]{ #Aˈ|uC.MAy1k"! 5y]%@_!80f;@]Ii(ʠv0FϚ%\)䌊 Y)֎ a<Qy@1 bu5;XfՌ.#8Xx;:B5^Q0 td+!x݆i63+I@̴LZU ה!P?Amj DQ;Pf`8 衲BXkY!5R]9QSTZ(U/h?V =i  j@eV3 o=JP)FC;T*"1 2߬n$l& -/m6ZY)1ZoS@4؀q-nzzuzwtso9sem x@^,4zV6 `-> U-iVn6fY#e-6n('g= @zVF6k80)QC^"$u-ҐQUr#as":Y'%\aۊLv0X$SSAv =>>kb1 ~+AZ964NB0Cʟu7(oV1Z8R.|.*jJ':Tiᦳ^:H9-й ᧀFSaМ6nmVܢhZHk֬UVm(|Ϥ@LQX BVcږtz] %7\sA-4p^ kJ (Bu(AdJ wbdzl?-Д nFГ5>dNҳ֞fJOQ!,yJi]0d@b~pQi4 65fs˥ZpU[.!b!c-ٱP짨*yPbٹ:ՌH첡wwW,"$R cV/-vp7 By:bdVk3v \Cq͛{~s-k򡬓f=xY3o[GޛK=KզKh&xG{yeofa}t`@w2qnsŕ~n/N^&TOD+VW|_wn=YT:nPVvz׷Sqj@ͪ:{]1,4 cg胔b4w%jWq $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@:^'U&%95Yp]*RHFy:"' $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@:b'RwKr9c,- .. :F'SqH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 *%98ׅ86;28 8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@pk}>śsjʭm7l ޮWus}} $e\`r1.1.%tƥķsŸ1.OzR%(Q}O\{ܠǡ/|{zteAWVC;1xOu<.ynBy'ߝݷ"v?`u܉տ)YV(,~f;@OH >DGdo?!ˡilbh] M3Z塩J?R&[CW תpt(:B"e7 +&mCW ,fQҕSNY ba9 Íz)th@e,+ޚT p ˹pcX ]mR_ M*h$%+tRWW2:Fb񽃲g[j7쫫|] ^lx9?^U6T|mod|۷j2|׫ˑ^<s=vzmsϽվvlh8h~.7)/u7Scnzw^7VW^\৛ fKMISͷ߼~M+nL7k*6sﴶPPNg gT^9Yx7-8]KY [)8ZMv=av"mM?vظpbm4.Ԫ(v,Ven9tpbmTNW;IOCWCO/= \/qhǡ<4=HCuJ^/4nCW טܕQ:-tute}oyJ 'ּԞϨ Nz>]M%f~1&}4 Ihi"kDW 8-GUS~)th9tb]!] -i͎/όUK+FWOWH|t0tZ]V˹pb NW)#+orL'zOyb/jko+K>s{7߀`w_;zzO$؟io~_on}9#xz?xy[~_~۷Wwt_3՝e>5a`uu2s?{w+NCw/tτgg_g-7_̉ sw8~{ >ٹ @03` ˂]ƔZ"(ը\QΪr+Q1*~YGn@)o=7'_"!: e{*&y [)O"PS>|-?S{j& N)eKI9)+r#lÈiE+Ҝ'U jI'Nǿ.Wra3E;ӧV_M+ENA:fS\%-dt5KI ɑ`UHb"A<)*cGvMcޘs0f31>ɞ.ѬD;}d2c1Ws>0C:攜 @2&9e"ڳOQO<`ëcjrt {;=Ҵ4GHDewi7nz(n^˥8n%#gvpmjInSGQs[>MPLUO6ibUl4Z (1oQ&ʠɤQP (t&]Dcwrٹw+:]8HW.Լy۶Ii31 o"G;#t f4K*)Iv jT \(9KBR Qo B( r,& 'ǒkфW(`Iq R %wނlJhWz9Q . ut1A[hF~2YeSF#7Fh I5䌒DaH 21p ޔQIXׄuȌ1P49uBIܞld!H2;xbar>۟4#H:8(<|X%Ff%YcQFA\¢e(Ɲ$HCb=$QB8v@:m5D)FI`&'XM[VR{F?+0va~~}%L e#~4 pߒ70v.p~lra&Gߗ0||W6!6UiyFM &vn9%5.XrJpq`@슝]-xcxBJ=ZKLV |d 3˪37KfN,M74!nvݻE8h{跋'L*2EǛΕolhf84kD^OW竗\qFIo|zwhGЋkdBpn {ژfOw0#}lu?Ox}=]x 6C|ھ^r:-v%PD6(śK?XLnmizƞS[7|k7fznVĄah,K>n=^w' ΀+g:^Z1et}4lǤrD :ewشQu_xoƺN^];?7}w޽w7/?& F2 g[IЏ&G#]Z]S󮅕tmjz6{kX!ow%m7.lk+@Bt }!]/?eii5+U u+D6yqk5R[TӰ|Zb CŅJW/qc# ~rğ4]ToQhQI0kiJ j3%wt Kp\)% W@嬙c(=wamjqG51p\w`ٵ%˳ `2QEܐbr0 ᴰSearW|tk%si:0L-tKeCk]l IMxRyk)jZM*>'5;i{اQ,toR̞"{llNxuzP!=LGlӌ?.p.8*MYd3'e}> $}uϳ@ K4D&C9dA_ %l)ʘNN LTP'U#g7Nh%ftQAZ1e\6qz;.+-iwR <Nk+}Xzڎ_8c\gBmCwخ^V`|޾ܿYU@VJ5FhɹIx\2N)2-ϖ^V|DsDG'JG'>rq>h4FZ[rҬ]ds0,1zH&'.:JB(IJRX%%,RE"TgrbF&2n4W rsC~ՠדɏ6P['p'BHs' [~M/5ӣț+@(k.{}Q* ̍5AdxD  }L n;g> s[됽ryXuHLV9j4YB!tIJEpI&$"Yh%F=4qymٴз@믗Yp K)gPG8Lb?MHخu:ߦInu}oEBmF帇eWr?d xn1uѾ6i;O,hx@}}>+7Y7k`%R\ .ʎ(lV{X{!lBU$VY.޺ }Fs4}Q2)˲1ð,?'UʥG)eyp&yge&V9)Ik),F,1R=MR i%GO4MLGfU4V#gi@T*с4:BzNEoDG6rу1#}a5rvKFh8m.X [|Ѽǯ8y/Y d…\羔D}셅 ܜ dy{t|ifq/VeFH}1 8V0$F)D/{/n-Ŀ~K"hF`$ĸ 3=y\Ƚvfm1E٤{3Q-CLA)Hg3DN3;tV{FΞ{U+>?}9c6pe78,+DtJE&BrZfN>QTuKdrh"g忄G( )%W@z$c7L"gzv y8*横zT㓃6*ZcrLF70[N3!zmq':ts^`1Y{sc?#{ .\PT6.xKa68y7OzÏxklbBkLY{21aA!(.֤ TUeE4 3A*@6Kis-p+%CtH^w"gAZq1EkWCIk}O !"'O`xGI$mK0BźՀ` ]P A,85G,'@3RVEr),٭[:&,X4b58T#Q4I#|O)d2ZK>dbH, ,6 Z:CРnR6|c$虐B5j0mIsi7 cYY#V#gFxq3cuVCI/}"D"g!GNxuRȸ3VkR'$zXa58T'Pamo6r*G %gmi$IpdJV d'WϿ I 2mI* Fst* O'.˨Ed&/g((˞J}^5rvCG҉Wrf|B7yuq,r\czxX,w,~EYc{ *DžٻD%WҬ>#m Vx|!c #*PzS4E;q{ŃA\.Ǹ$AQ4WS8!,%eJƞ5䘍PLG| ԀUzet֣*d"MΨxj-qȫSr.me +ylqv8 7PaΚ|ܿ*mxvþPbz}Lh -oXhY >/*cXmP ! +sHM)yX-ӘvRuR̚ NɃḐFwZZb$ TД IǼJ^O$%hW⚶)I"i%YD"m% w0|].ro%Udr? Mwf4XH`z34RJ:%V,ēJs@L%D%c# *HRRR}2综yJb0DUhK!q4GܱRPėPX@h`\X#IuRI.P˖y+C8ft%lT;o-)^W(f_ǡ_c]Zc4" NEyQd#\L&A{PAz0(3J:ƦYkcam5i[Ի4riOz]< epnᦨ* A(Qy}Bp (1IL>++X%Vr0АKȽ;Ǎ4EcBM܃  ڨ<QJOuTIks/>;y 2.:pe]ܺ*չT*&\T(+R4G?}9_s 1PuBn0/PkGQQPcH߽~x00:)b (WMeEe f7û;VT W7x9p)8ΪºR L}Ț%1("@E*D [4dϼs0Q )}ZanHo]~z[ewڲۧ]o>2mF 3mW7ޞ==,cL>uq>BҍF,~kABVW09 j]9>R'YpgݾIעEE$KWߔ9# *95F.ژDԹXLj2.7C;_7hj!h"#`*Qfs3DbKĶVٚ8{lHT+p~rw3 )$KB|6߹K"/Gtⵣ†`?በ45XФhM1ʽ Qie啃McNZ!:Yj]u\b_̫"j;ːttKln[YJ{u?]dr L'`' ._r)'|VJpe'6KExya!|8t\gigi}2 yWmAR6к躭\:y-h|JS:|Psqjx7{p#qh,?\Z:*1; 5a43sLR)rQ:AIMs MN$mB?U 8!5c9- \PJ8.p $I[4J")Q@=hļô҄m|>F.mì&mmx1Ùy7ߝHa843]w6.6rۼ ڙ,5i_ꔱ#s=G$ IIBS,DTXs+=9QdsG׆(f9'øCqJX0,I3N`ur,JA$#7Qabq{԰ERK,n|".dl^0L$XN{HtN~b4/Ryo̖#OnGU8 0.C3}uq8/90en']x5z d2j{ծ]S;O*fw.w}!{r;1߄`7U^n@_#4{ɾ/S=zh*Gk~Sa'T(|R ^㦊,9P4udcJC'X%?JTT/}M5%_ifJ|5fc ݟ@{ek:́{*+Y+2M*byyuE965u.+OK$_ig`5b.?<𲜁yqrO(Wb|Q3_<0B ^r]e-ak/근o2@*rH1VQ1XAv?p^#wr{"q6~״=u(oykv@w=}b he Gta* m9Gc4W4sGx*co|Eܘ{\ '뫛8.{f+ voNTq_]Im<.qjS3E=_jzD-P%Km]xd</7'V7W)+콐L2z;[n)?_~ܢy\4QAٷ5:moH^)GTԁErj*ڱx2kbMkp.xbxXx檡RʰLo5fU*uny¯[s]7 цwC\A/> >ZQ S Dq-0팡²(Ę-0GIݻ[9Uu,|іb(Z{sz2xx4QADN8/PA<)-DȑzV8C{nv@/D|͍5_.ۗzeۻ cUI>F8m&RO{?Wu`[]Ua #FP^(eeP$KhGE#P$$jbb9Ulz_ J&fZ>罥BecFEp.yg+jYmM=WܥCU|LGY;}n1*rW=*Ѯv36z޿ݼoҝ^GBn -w.+B}%m2 _.]T[<71ju qڹy0LԷxd{8k˳|Gd?qJakj.(cG][ӷo/`-U˭dƅ:mMh׳i-5w,$V](x<K$=S,>ʓG[_ظ.|OFCj-WHrg]1ݺrv]6ؚ6q{TD &eJ,Lʈ[&!Jgbx3\t :OF5ɲMM0 ]P,2?-ͥOZi(e}oϟ*=^'"'gkyYRrtϓv,ݣ|\z*(ˁ,!WYZyB)5=\ET_`Wx&YwS(]"VZFl5` $WݻjDM,(2R'γfB/\ LgqO |4JK):LR0a* ,UWK,<\e) •^V* UWK,]+Q+i$ fr*KUVt^RJ+e5tĠxb*.D4Kki ?}p-_\嬣/lk b|WYZfWYJo a>hWb//}0s'/)y?ﱣV e0Ah'J ]'9lNᦘ(TB !(ӻQzF~x.Jp-^ ,͗X`v3,3<7(ɮ+]]4u_\ yp 9jIE*XX-EaNe#QkucXT{,f4Ix ]D|6ߌ>ݽaA.s||zvL6Fklp q,5D[KD@frq+ѭҎ5|r-W8xKeL3R)ͩZkMݗ83voégWvU_ %oxVj^*vM6dWlʏvW*MqY> " \K(Ph?cҐL[AQv-K үVnY:ҞN.H  "*B?{VX`v7ݳ=4`^&x)ڞRZ8-]l9-˴$ʃcyUXeȑ΀ \8T΂=lZ,Zz eS8-\qDmꓸhfZ^WE")t EVd 8#sth=h2yE{b-:1H) VLB1Rl&il_@i[Y.w@?k!64+αv$m֏tiq];Mfy͂i8u`]jGb'c_OpUG]LrӨꜺ^ImE f rT#'p GYo[zlPsC-?f;.g̎o_~?_?-HzW`J FIQX_uymƖ/h9 ޡ=OK Ro~0t>I#j Mk^y]U]Q^ b~,-@X|"*1 l ɷeএq&Q>i2߹$ɑSPj H5w%f8(0X@<8JqX};zs3is T$Ty1ub8L ",3ac^7o$x흯>_֤k\uaz&rr/eaOn{Sg4HjR i]:B-C,gXܜ93Of9=8h<;b2ec}enӝvQ5u=u+ }q2-Dab,b>$ @ȬC mYc1;/< [j-E.t*fo"qf*]ls?N%` y=~A ʜ>ULvrZx'Yi:Y篧nQ,eJ7.LqxQ,c` p^x@G/g!5n5ϦrJ9+ML.A.3J-9@\<[zݻ҉GQǡC|-URB1X*ut*# (\%+JPR)(j|`.VkHRs.k!(ѢPE6FȹV47+NZ8E͠ד6T' p~ > &RsOc[o}ʉwT.Iߝw 53:]:+Ș" }L n3|6skry9tJ;w?lRjF%eRq U:x }֙XKN{(7⼇&.lu}Kt~F700^G:5xg4<}0~d<>:Zk^^6^cn88ܰXi/9aY=j܏|<7M_XܹӾhf7Ohxc}<36Yw+כ`kdp*Dv3?@zl.B:cIf`\o};6x1 vwx.=2G?`4҈HG-kNq8JDAu9br$IQje8EY44Jv UV+!jS4U=P`RK[@z+pY$\EƑfl:!uǣOWH-9_N-u+sr.g{6s[%V2I dcsHzPS%0̙d )d dM%8y0%%`Hț\% @$,DBȹ_o@ǣѴl:pQwol)zGYg+y[e\mZ=+[sδO7ׅ'd1PdD:`m#9" ZjE0j63ON{`c hq1,X{\i{le^bы sj!eA௥+ (#t,ʥT3Z~h%FQS?s(zJ`.>(Rt@ FJflFViWxg l %t%Eunˊ74&]ۚ\M|wn`088Lq" M#hkY(!RE 99+2Cmo|˩yUUmVG3Jǫc'mI^1ǻZsƎabnjmXk> ػ8>LQ$Sc%D-oCq}3ZԶP1CΰE4{IeĹd8R-Bdة& SXs>ZEu|*}шcWkD}Јx;=E /h}J.ZDPD1+p,| ?Z푡Aۤ>@}cb_,ʨez&昜&KPK=iQI9kďgį:^tsڧXg3.U/zA/JMO6 !KwfCB)1cZ zIfܱ>pǜ'PaˬϚ\Mtȭ\6לݧQ̒O;$kL}/+/d݇?b$H*lnbufE%CgAHv%yI/[qV %TYk#IYJR:xP` jl k)gTD,I>c-0FIkx¾F02SVԺ^3rǣoz|Euq,57_<7w3Ê a}E> b!xltlr߯7r<}>І4sF j ])o'h)ӫnq|5B7lS]t)vZya ldӜgo=#w`s^Ѫu2e5WU Y5:Z{*Ԓ(ΞwR3j\ˡ#-29n69&l`v9UA+r3thK-ݘϭf7.](;UÜlטs#wx":ί: s/W(^ЪZe w ].6Nv.)\ft oe}*Zihm^mvpvAj"%I"L2Nc hD P4 aX{q$w&4y`w7~m Lڱsv3=[%q""'rG tw"Qd*Va96y`yW@I:m yɘ&P:!LDwi:@MkS}ro͝D`Bb>z4C"ereRJ-:ixj9N iOՑsOG @bHAic R:Pdt r,Lׅ;g_kL[' 91w)9Sr"ZjxAz3A#ӁXA,=CpǫU ˵^ (yCYC\Xx`,P< u"eGU2c"ؚUKCt x>4J(Wu3T& 0J9A`ALz1yiq O.rJX,֬\鬣%Ezp n$5/Utbe , #2µ$Y<)Ĥ4GAI.4010d 뙱1@©Iē\ h8b4:/URK]_vvIf )9LhƉ;gD',OοbO]numJzDW} ?mGj|{BO$]ġ#(J(emw.3b:ǎc:VSZ_&ȩ!)JY ;E,@i: W:m-\HMͦ<g7 |.&*K=ՔLND j47ser.⃨M8Q8B00W` rh"|"Q9MDIQHPzɆXLjMB!" +(rߴ1l|$?n'*wto#,Z ·e=S0yP@$R%xHrPQHV &80ntv1 G+8yPv'y !{lp$NQOIh$*G]GM8OWAWk9sq\28}y(jP02tR9EsNK,SQ;~Ψ="X-4хm@%uٍva^=H^ꂉg=_\+g*jSʳDIdJx-uEIȣI tl|6~VxrJI/Qq#|&]4~4fǞ9rPgS9ICF|C"zyDZ.?HhV#OgWF\懟l/.?^3_p ]LYc`m֨#j5}r5k.rtueb:)&˃(䲠\PAӶWv򑫬)fqmke~C mʻh\R0 CklKf;6̷f l6 J74(Zna^3lv#޺̾ 4lg/jU{N6o 3`?Ԍ⾸htD4\b}LKܛdQSe&i !k`Pz0wr:(G; 0~'ҎXHyQ}$A4H&79ٳ5J9r=í&Cn^@yKpY㑟#kA: {ϫzޟߧQfV )}ڌ~K^_!ٞ(P=鬺]뻜a?8}ϟ?7p.Oξ<y)᧟n>Z DždL(WO[ 2 !n}=R~2?\ gLg ʨ\E[qB|Mho8<-y,;R|w_74Sy]?FJ(_6㻫Ѥ٨mqSonf^7?P,,wꂫH lvi8//ӻ]Aq3~F<+jq/gmq9,?wc|}TgE2CSܔUdD6(K7F!ϛӻ*ر^ "I4" yJS|Ke`X8R&H"Q ibb 悷y$U`T"\MfpR9u1ł7)Pur;r3)r|~ЅͮX6ߡ-pKFlkjϬa얮Sx7UKq ȍ-"!2tzvnA0O GMwdn].Zwx1]ij>ݺnZG=79yf|MknȮ~Gi{R-jd)+Pbhz3߽zP7 5wUm0%n2%=M{KA7{$ߪ9|LAɝ'JA#-cDs0=^3 *|%e}>MJZL{rY3__}m+M˃׳o>WIe{A?~ݭ+`xvqiJF<*% PG' pfApep[9ɉs1*K}좲lhTW`#_b/p5Z^b?(97Bn=1D{_jva9a]rYړ\ .6"-@\ C ~ձj ϣx )AP\0F5B})tؐar*l:CKzϦ3Tl i NJk~2*kĩ+wvt`Wo]؜B'î2R ,x>P57ȮN]!aW\O]eh]e(`{JJ >+ 9+W*Cٷˁ]R$8!v] Rx0Bh *C9HWo]i`hH\m{1ি-]s<2JM9.j>hZL'GzcFj2ߎ{;oPP&_h286Am (p'υ)'yľ{41Fy4BK&R€ t]14ҧ D54L$̟oߟ1'q1WrkdilHeΣeSγvVag;(r"E%e$<܃)pLTP9u*0&NVpx_?WPX+!Ϟ[PVϺOZQ^z]V Ln޼-wClϯ]T r1R"+IM,Y!J)ƕ sLA2Xamr gx@ P ]i㹳jǘ2D*"A;12PJ(%%c%lSf82Ow<8&v]RlC.gLUWU_@ 8 B&J2#g@+,1[,&҆vB*MbXi# >GraGG01'd^`j@%v)F[x"4aHJē[(\)t+٨:/URK7cA0!ǰgLG BGk{V)e2h}Z_Gx'M+tҜ>"<4"Rs\ٖJX]}b2/6ʐE՞-OgZqIZojs4z@Ok7T ڽz]y꘏Whm]<ƃ<60@:yd*$GƁg*Ab>LQL:p"h͢4 J*.M6kJ׿ߏR@\R7~lfOSW|>/;?G^K>}m17_|t*v:U=wV"}U.7 u)?Hw0z֡t)TVrjfփq]R&uisӅUkBYڊnW t]]l@vBZ>ҺYv}XgiOg0*![-҇ \1!33DlY(LV3'C҂;,RlU!swcITؙ!SNrg%=qKaDkHh\(1>.Dud~r៎Q[2Ns4%}ыG*23VfEU>2 aU_k)oŵ|ME_DA0xƵM ?Gjo'Jxb/bibq&<ӂ_9U KqI9AEoZle41{.7zД܃~;Ȅ41 rxlct՘dV]Hp&TE2Rk.@0uz7Ye&";Y`d]YMH:w[1H>8 Kڶ=S'z5[Y{`n'HGh龑;Q9HƲkCVUQKԭJ+ϭ*k;Hke}$)!4dNZA%2Ę&t\ݩhHcvn.QۧGp Ohy2eT-(Uq(17QYf6cp{EVOar _10kJvbq3G7*5 $X 1 f$uݔKx6)<jB|+Br:b;MD |SkŽG#C1nvEɢSI{'0̢Π]NY,ώ>F]y6C؝ >Ƙ! Bv9K.k6Rgt(x%?֒crA9W9>VxN!T]2<?GRp B24%8Y}5?2XB+ &Խ@Q$lp͜vw!+Np痠W-:OlYD'6"zɓ#NqjrwWNW1Wj˜+6o]O!-E35"KeA^rVÅJM} np 4^so{r&h[}{ď%?lƏ]ŇN<-/=簣)VNׂR*7kER M^ %6뽯^lMW Hp9?tgێg^oگ2-k_LtΑ,iRtPNy|O|Y`>8:g~G 8V`NBrC6Y6-m8l$\J檏F<#lQ fP OK6{P6|6]N"u .ջyڼQy7Mg.>%z1~Z%L;hƞ\%~P {|;nF,MS>{uY/Zc :,&q Ir0֑e<| ?  k97dJp&|.tAyc9q͐Z/H&VD|q Ti6ԞOδvCz9w N8u k8]|pE(#/Nw \Iw3elXj2͗|HξĽu$ͥi"DyigygԥaH^a蕡_E}i5:a9"))0jL7UWo$|Jn9lf8XfJc!ҋ L3L6䘑:D_X2*PhSǧu^ J5؉TjIUO..i~:kcE0cy?5e y7e*__9fYW:E7/zBrțі?L6/$cˬ&s_~?HJ/qE'a$+wȖuCq6%o~k_K_|3҆6'$/:OHnЫ5'%o^7{ڷAe] Usn7ugF_|}ZmNtrkA3n::E_ iciqYiN䗗oz{cĢ]-f'$^oj:Qiw_Ψ˭̷AfEJnxIW]-}Xp2U瑞B(@1I2) >dӥ䔧@LKvīNm0׿ź/:nP 1Λ3: lMslzɝsG&c6/>G"o}>׸-Cl54^<>&}2^?}Y`e{exnTR@۫]Ct~L)2=ca u_Ѽ &&kxʈ6 =i&'N0{`9N29JڱL2tNB+"smo#p$&.DO8R QknM谻5 ]mS g]Qꯒ_˥f 鞐Ius)\f0DHf:T={RC#8E2s:I`!ȠWg߈;^.gymfߓgL<Α 4o+"K+4E#ˮRK'Vm>:PfTVs[nhYrϊSLI2-dZ (w~@Tm iPA1B:3«>l轲K.EDp=b ː""iEf)ỷC&(@sǀV}Hy;&ǖb瑍˥L`H2zmdK#EM!8AEgTƚRHb M, :;IuO'o"*X!tNq#)X\lnUCap8CוӡdVӓ]3sdϖN'cd(n*%<2|[ +uYëjԀ|AWgԢd,g 8P6Y8f2d!@c\3hJt@K/4 6eEb2H\+j{jlJ5_XM3vCX5_( >;/xMm½ê/?lqv:=9cBgmʔfI5J@@t6$LYξ*t2+z %؃$$ EqhT0*YSx%s$)*vZla1OBCմcW-+{m9y ަɁ$xb%ʋhm2 HZdR2Li%uUaB NMȐ+DKŒc!H)"hp5q._Wx(~Ue&8y[)TRiELQZqV!IU\C@(o ͒3fɠrB4Jt1R$-J)U,S=b5q{ďH:_\9iMe'8[;b\2[ e'*4I# '@X!Xf5OM~q/~Pa5p$=-D=e7Fn |O9 K'MُWz#j*ڞ ʲ m4.*dنluY+"D<]kSG+S6}OߨrmeڵT~YW[k!I8IH@ьOs.݇ȭOF٭Ofh?wnSkJ_ow젱D~ A%,K,'opϴM 42LY [`Y$_kDH!*'IB$5Oh)-%A-9dږN"D\l!j)øsO^*-dҺm8wuTxfZNn*FqMLȃRH`AB|c։h<m٩ ,vHl+$3ɭgm>? پ@03`Hg/'<F\4S9s' wxFnKA[&T~.?SGj@ah(nRH_ CoSWjutv~Bx{u@L˨K@Q7e@ ͒`एlHfwűU:[ W#wO.w'|=L{%3FIǘ 5вP~ kuκLb^ Lw!NƩ.݃EoXg:~M3c!W4}@5gekrָLK!jFKX;N<`lv3w!۱t/1z2)5bnnYeJEtm%J$Jʼ aWleް؄& `w^l2*5[igW(!k'QUc|m,I'tF=WܸyzBo(7yRE(SLNNcXQ + ,V$e >nw%!AHåRYH zgBȤbBŮk;#gã|5%OKy-+Ot'd|o0 $_8Hz9rL-FȘ.#_/0+`dR:P.WdbZ%96\cY@XM(IDr4,Q]߁w+<6 E. S۝ 2&Jъc,LP3fɻWB2!q{4ykdvH{aaqC D=g!,mdXrsj%,IB醂tsAH2!-$ Ж^(MIyFPܢ0 ,5.g9 U#?{)jO *65K\rV""ҴM9i<3~(7a9(1Iw12Ğ2_l{uzXF.Xj~o"uiuE2[%Zgfk4x1 ECΟqjzџSNV/GϠT9V+s]0#ل~nl.|w2}?283=ry][av)PD6dt\| ;Hgغ, #<ϵ,3ĘFG U,|4zr6f?brQ9z$׍vƈ% i.#e`I8GLHDaQ#k*z"}X^B=W_(|wW<śCߞyYh)kIPw&;0?xi&C.g=[ լ)g8 $Ni&SNgV~]E]c_!Ȉ^Ifh9)8|(6*X~Qq7:z0+}U4q^E%L6e6PJ x+AD ȕ42g,;G#sm[oK8h"9˓x02b9d yٌkRt:&%v:IlU pg<&MT.]v5LW0aș켆iRPKnC-iIh'b3Fdɸ+XFkxImڕߪ{yfӜ l RTdt]O;uA_?pxD\Y$ C4NXfDN"KM ڨ" ٨h2Dr+rIgm̷]T7VE]iޞ/r}3O;ߏrgCخk=:@G+3`Irn[Κ<"aKALr٠:};CoS4MF[sx-CVPJ|oɿQܿiTfn]@a2H.jd6XSR jOÖѷcڭV跢{/{%bd"ˤJzpCgM9?u\^r#'H2j\`OBPq+ B*Ծ 5`ՙ{c}l!jXՐTϗsIVLHACWaSohi5wd f" x؞*z۷7AT%RӅ`&rªt ]M*ީ/PMs+-\,aa4:>.1ozULzޅ`^<8yV0%ѻ$OtY^ vKËbljTdQZڹcGeV-+o^㛃 Y̟=~MMrK+J+߻+JT, ХJFcT,cdb&LpM݁eOѹѼR]^^EGpbX ]a^֒'^v)a-,Ӓ]~b}menPrQjd{j'>F.E#"*eƔONiT:rT!'['T`9RkS,7=lov  +iu9]yʘ/]|~L|tmZ|^rsrq攈re^0yBse,=_I8;o9lsV-] RW)PL=h!?UD Rb_`LT nY`BW}?]1I1-HfIձtFMdׁo;FcX9{`]T1,4e4VpUڸ:dHwr2Nm ]G!C39=3d"`ype_xV7x"#A68)D,4uJо&n?>-@-2:cmk48J.HyDti1@+>Fæ ۪מ _t4k86Z|ָW}IzMjrZ$7)o=]_}_ݨzċoB2t8Xmg=Z~R8j4mU;GP{A5MaZ]How7z(U]peH(ه;5 4pQϖnU mWCsz3߫^J}ObK$ -BRVlDu/rrPq[i7{Ջ6$g{oYUY;'j]ILzWW8b=>){['#Z`(wa[IaviE' 8y؇dR}F#HL&A[.)[UbW}* ke< MA4,DBAS=U>Eܓ2)FKl<{r^nR$0JZO¤Xf(s\]:^nE_i'cF6/, 29hN} gf׭cz>`cXҳK@zt=r]xpǗY/2EA}Xא@Z˄Ap*+d&Hibv3r|A8.Doui9*5 8t `լ.[cM-q(dhP"E=]@jXBVK5@p.F3/~\rlZ%"Z^X:Z|CDG/y$s6<cm@Hj:+d5tE#z%fE;wTmz莩k{84^F&ioJB_S*ou[_n+e߳1*VN}O|MP(V-.ꤻoLw+qK6_nS %Xyڐ]nrd%mب{DJ_]u/T<}J9.y-|!q}ހ}p'Xlc-77[V96ǹ[chNΏe`K}QԻwئXyctk/sٻןxo'd 'ŋYy794i5iqgg3M7{YEmaF˪AQh玦QKO+֩跋ĉB aw]Nϲ{>$[u%ݗ E+Aeiтx_nCH٢- ?6g ֻw Wg7H;+N&-}nsoP)N.p1YEX3@},3jv3޾ǂNM8cy-.;u@V\"D5aMe5գ绚o7O<7[[by0$G.F y*b?Ш貗 rDRS*@Y1F,qϏ,GXJZ69MLTIr,:#r Ĝ,3RJJP)oy@  k{kbM* W=VG-bENL9րp:ů{jjktШLc #%4ACW|tCeJK{2= fFinCiXrߍh02'iA/tlI4: l! )W dN}It0D1g?:v֯9:/ښ[wpˊvJ.1o-ZY(k?n麟wzzyٯ;[WQ.ΫH\BbmoڔYM׺NnkG=c5v;=ܬtCwnjn$V[:),xw[o(LT_4ݵzwz׭_б/S_9rs|g͝}<'~cM;hi&|dh2H@:?aݟ@'!4*i˲51`hi"O>D(ts;LbbVPosL;쿕Sl d Q}#tj.9#Ο.Ɣ@Lڐ "'m,8.CkL49Q^}mr;szIm6P*ZG#,lnW`}Z{5x<}Ε\W xJ0.Sq؝>o%CQ72(}-@+њ<8_f8C,JܔD划3L'*FlUgF|ESa3f>=s1#Bd*8%yüdYK'F*c$)k1Hj8 ܐdQg+.&^DQ2RKk'IXPPn; \MOMvG\= ^@C[^T骨}E-j g9Nz) HΡ[鲰gD91WEFC$`S\+]}'P"k3Tmd&vdUaa5 jX(.xMrKwsЦywMA#9cB P2Ĥ>\L ZH)I#Q)̊8{> & () DQ K̒Lʡr&G$ls4ktV\cAjڱ/jʨ-ƌ %<('"@2 $ZѓϖddYJ(a| VM̐+D Kb1q$)"9p5qa/r 0 "Vӏ}QUFD5  3TRrF>b+$%cWeη6sҟF9 jP7JYΘE1Aq p@\p} mIrpE2"Vg;"~:Gzԁpqȉ9jZ/.ʸ\pqy2h2sϸ# *4I#^NBޥd\<. Vӎ}*A/bU%m )Q:яdljH*n8˾ƆF,4Q"b?pTu>Z0OP 33Ԛ /ZALB zeC.b 3HW:Yݓ PcLkLƢ9*9$'fcjDzD1׶R旋Lً-^ĆUV,+䰸P~ڍ"|yGeR\vsEe׼$_Bʀ$'mp%S+R[5Vlj2> -N3afd6G&;oYeL 2&?/Vs鈸yV* )hȭ*DV•epLEV1-"X%NVjhg] lQ| v17J6Z/QJz\ G!E"8@MZp)iyr3H"+,nRNJDJDM+@Qx5OZq1R:#hx%A h`2Ϻ &"A pqtbm+ُ6\( 2M3h$T@<r"Jk w>ӄSAC vkl3lI&Gk|-5wdbf|2 ,;Srg@r&Ƴ$gK2vT4d_p}2F?^CMɲG,KFDġ,` 6nJm+[]"N@w78cݑ7߾KJoHW?_g a:(w?č'.O8?8jc4'\ +V2$IV>e0vԷYF}g洋fyMA/35](,oHзGо퀒m9w7aM?toIF3: 6hwNG?vwQi6o;=W׳ɼSèx=2ވޝoN86>̺r6H<,2"l?k ^k(MM&4RvX*!^| ֛ [zi+_j9|6Men=^u"*LPPD*xmRI(őKScc%å4={_HItPZ*V%V!2d"*i!Ї:]hz^jEe6X&E9*T +uJGgs @O_@GpVHqp-%fx0[ƺ dy}֨\DsԨHۓ,@ѴChG緁g&/-6!_t|u}Z('p I:S&;b޵a3S-*$BOgӭY>⋗0 R{ev׽8[oǶtP9Gӟn/_0Vʖ_ժff`繲E?(l`Ŵoݶٽre=ꬓZ]W퐎R|~Hj4#_1Kx$?R2~nuA{ۋ~y?}o/0Q?xKXu Cg%5߷ 55fu5 ͚UiZWhW:%#o%v|m-ٛ__ȥ~f^eu#]5aWM+'jr7enOwPa$|aBp͈sj< #-%[{%Svʏ|D 1`r#Pm]|4K pRIqټC,{jvȂF;Jh0I8pv`eC8"B, ;\TBm2LB6i{N'G%u܏%XJjaMl?AΕ-ur/DZ&* \rmP&(`}0i.q۪ dN7ڤRmݳKyJD_;8.bDc`YXq¦CRxf7NCFF: l0 R9HysQ Ű8ʻjXy] /m/HŪiM!*LOu\ڒ$C.ѡGǖ))贘tnK 럼 (UpsuΔù10xKvT]vw,.ӫL岋9;>6FGVކ,,/C?siak`' \>~O}SR1aka}[ϫVb⹪[ɍ=W^~Ćɿ;) |zj7͙\Kp"*"N|Xao~GH230)K?oak`x!rF9*gRbrAs2uW@k(ņ0*jgF%5 A9J"zq0@&.p3"5SDce:S"84L%d0l\݇iy0 lpL7IBW:I_rp/NyhE{_=1݈|?amHMT R,d;Վd) y l_kBUZD~ǫSLο)6+ėyd rX` >Km#,[|56qp&$װVjSʜ!Crϥ Cpew8؎\&6R.ZAc+U`ޜsTR0οzMV@")6gF\%bw1nUEAI8jW)l7U3qvxg #vcPڙট,Nrzϒ_L?i/SQsok \`0{3gBօ1zlx[y`G1dv E^шȽx#,ǐ Ω1D؉ht2P/R Q&B `.E|aK1`l FcD2YɈhA #(H8HX>Ck\(8?uSYuX-Ӕk؞Z}!V̴s1QiVHeV`7ʝ3䅄՜h6cH-CkKvhNƛ+s9QI<&ȁQGR➥دV۷{]2W L5i̥&6R3/5JJK#Ԕ$s sJsЪ7W V:Bs`0KX^wiFI;h& ,pd~psD`1;dLW? >٦`z}_ V@bb0tuI!n3?fOkkz2)3ՃM/^h0v/EoXjr˩c4"$Gaӹtn3 !:["N " pp8&:K(h? vR+,K1n\̛b]\loJ[sl̕8\XPgs\盀=RXxZ\\m:X`'ۚ+LpsUKXSUBKա\DJ9(@-;ME9/gyɯ);3No޿;1ƓG=f&gc ΰ8 ӎĊ ,76! +3+n=:[⭕>B+Md4\`icUKDSUB١R\b Ԥ#7\%1{V20uZ1_l+~xtiwm b!ŘO$#82>10 f1grap9+kT.yNqXt6uZ&TcD8UY$W\lgRf4p!V$kXׯuE-+ks6%RA1LyJ;nTL6Y6IU=>9}ĪLIaom\:xXυ۔8BO3Bve֓p TyWA:f d$;QvS.%+C*kLkvoJt  }4"Q2pvކY҂?: jsk(L+8D[M/\5\glS8wwYgGI)~;MvW֎ SɎsF;50y6tnmNdgΌR&:ݔ4eK:~}?l5Hd܋Fb|Scc7jU^QWcL(q5 5q~1wS2V=W#槌s#BYUҋUb!]+#*R)@9*뙟 MVo]|c_DXqjq3|X7?Sn5;:o+\&YVj)qp9yK@WGĽ1@! >0q)kZ)^H9{Y=BY\U dXҠ哗6&$L7)}T>%JE[ϧ#|%jA\s҃+O(iS(g'e$Op)\tӦIŃY^J',-3nYtĴaR&E`ƤrHhw'c{RP6?1B8A g_].nL:Äv׉'7W %m=s+N %ڿZ07Z.slzhŞnC)Matk6zS$3\%5\puSUBiۛR8eD.}XdXq^T s.M4 l=\4L'Oh+3*~f:Ժ5Gh)s7, 9O~w1 GI,I^ޘ)|jC+_̪e -E(FrX Ⱦ^C/NǢJ\Uzpr,!qͥ>E%bY%((}t\(^}ϓ[ sƓa{~ 0ӛ{7s}zgv?'hUOM l^uaz$j낃(,֫M#2(]oHW>B`&}Ipg,iENf(Kd儻D!_>D$)(#S\i.h$jJcB>,#~dO&Î}#o6U ܺ-?M~_5Tl#pÖVl9IX>[bۂC|xs̺?$=j.,OQl/vYS6l;uB1 , ^naÛ??|haRv}Y\9L!2 ,%& `MW 0OpL:2\IJEN6]ۼe_&@ЌG֤ vsf{c1īK2oc;jo۰Sȶr6; CsR zIGr8 Q }VQ<3BX,A;6^c 1,Y9%$[%"VKp!n8u] N!\Yy$z}2E4^JJA#-,x,{Lc/Nz齔gSϒAЉ`FR?2D WqlՅY¯%evW"%a-G6F1A6TR9vo륬 ֊*sove}_{N݂t޼(7v$+w-^aD;mb! ˍVYv Ki`{@|?Via{tz-݅V) }%+ǯ_+,.€VA[dKJPq_L2ri8*N埧b+ы%;0Y 8'*5iBW,񥖌&%vFy"Mj 11r#}ټ42yU_bppFXtm0?2~KpM22xMz7d$LkF8*g3 t>N$ޔbʼOmXbxGZ@m]/9c̵{+ZAc+XxTpT dQC=t")PRNq,zQ;[B7ڂʸ)a"LN/h(v~X375Ƴ6ص;"@N?ui_-<ɥZύ'3/m&X L^-Uf;h44JʻSc-]Į6ALW_ A(9?М'#؃=a+*8dionbrVAZ>dq)'p7f u:vCS"Cz ^ wE%H+} *ۊp[1TWmbjU\)+:ңXK*4C\*Yxi*x;O`iB ³W_\OomumEv0K74{oF546F8{X)f\!|5pZ8kpYA=z%5#Ȏ*$$di8wn! w=͘3!STBƘ\qRr.CexԌex/ݻ k {D4:zZ)(!P">X%\0aS!aBǂQG"`"0-a$EV 1]eXJƥ1ϮGCؿ7˗~}Ym}I8=xb/@JvN3?f8* `RY!/$R:F",Q6h*T ,f\F1x3]v XY {0]_vՠ'UlZm[ge^S-W\@FL*3ҎQsQQD:r9u87ݧqiSRFvFč%6F00*Vf8F$cZQ)Ju(j9$$V8m0פLc1>"l? v~ZtӒ)E 0o#ݡ0o` }BCа7ၜ uKe0l2 Ku U<f$lyfO66'ÜQf YcIb4-L)J?WFo4%"(0S@R]]S)*qI| 9*:L9DŽRn&x92J#"zk[R.k%GƮ{;cgC:Bp8yv/}(H/" $Γd~4CW( HTD +bVoS^o@•qB8{ey/~ }*K`8ʁY oVAI$wV_.:ySM9T~l-^~_]/e 4c6 Cv-e}.'\ڔA ѡEF UW&P&q:)6ѢʫnlyqRY 2H̏f^kvHP[|_h[լ%-_p[R}87>,4R7T|% >Ee.7<]1sXO˿gZ@.O/gܕl{rz_:h IWs02UX?mb*JP qs&Mt2H"hn\#r85m:?%ђǚV|ّ"EOzn*:re}U1C֨0|.{ذ 1d$ ;!㢹b >L Ež{(+j\3nGYz>$zR>;'X b ȴmo!23O%&4mtѬ6tJ0˻dQc;| m8ht樕J:1&II#^9GvXPّEio+/n9 lNF ҋ*?d3_cG_Y~/uh6\os0*pZ5<1Kn2hRVk~^VC oE1y)&*/H$?|Q׭:,H)h&Zm{HWᤧEAoҽ^Ld -Ne3~_I N6?(]ej_j;sLJưwndwMU.v֔-w|:AP%t;HhuAބ?ƨwnzuCQj7ս5}s3| z޴zlsN%çwSW4ڧ|uGr'GXj2,ш4E-OdS|SRN M]qR<*+]yygNVpK(@vJª٭FeSutnckc e & ¼&BP5;LjnDh:vF.x&y|tzS1Yvބy!FE_iz>l>{ ΘG4rӏrޕq$STF^)@Xxeژ``y0/#yD\QMMZY%6ϤؒJHvUueTV_dqG[9rwe'N)hm,&! 'XBoTI)HqI 6uP#!2֪2q$ ٦H1}aKO؍z'tc G5v&;Q+~_2\XNl/!XoNn}x*vFv(|3g#2MqJ"cؔdNeq6U߲"{9`֪:FK-H!w*@5N\B(%U꽑ލ V`MP jjM([sŗA O(|B!u?Փ<ڒ >lb/^h6;PcJI"/9TZő44OrtƔ'UA\a +x#fAtc;eAkqr?pabyfZ{9t~(a}`t^" ( )[g1 B 4?UpM5 G 6Dg8FL5^uL(BF`yF]A Cl*T`Q ~bZ)- /%@1Q jsēV=|+"CWqxg|ɜo_Zme'Fjo#u8NYG;:*Xv\ h)~u@d yJ CZ"H'n@7gFFdFjl&&o!`bH6)QtPZՋ8 ΥfB૭ZʂRSBz+S{FP4pNC!+bb{>ybOYЯSbOĞk儭c1AGY'F2Fkyhxw;%kk. B]eK.`` Cr) U;QdP}C%5cƺK:G<t>9dDm8Kt(Sb1-,2zj ٠;_w-6NNfJ:+O.^U]9#?p/""oGu ?|{?jK \yeL['lS9o;O-ݓ?c#90"9`,+iO]&ǗG]69$L˟W BΝ1M7<׳q G:[٪9ШrW^l;GR>f/lYD`l7k+13bipIٻ_E/̎A=ί)*/;Lqɜ WY{K:qICbK H '+icd7F=o#UPq)"k.AW%.ȃtxdrzEѸׇLe3t>%!,Q.rӷĕ*st^xOX(Z/10ZN N8}fŭ*v&J1x>H1T#Uʚ*G@eo,U,ȹ^/Y_u]RɹVWUܧXV0\WL a洂H:8l CƊK{1wLn.!A5̙|ʯMs(oCy'Z;+ATm]k$X:L{Qٛp囜h&l`5U4:LΛM(xM)q2PAHVgg&LYAx2ɥW3dIqȂo% Sȹ^Py>,僋46!ܵ%^ؐxr*U֮ZPd^A E% rX"AÚj)$6\jF/7-: .ҪW(RC}Fl *JfFV quqgnPä TkOuxԐ񚞪˓7쨳ǻ&O!]~;h!\b>Xmd 3fc@&,o@jEZŢ`tSc6y[P<^Hhm5xHӻzsNrݍ;Mgm&=!+R4O@AgdL⩉0**AjiOrM*ś qtՇy9KN!Xa+r18 yr R]QYj{O9ѩoUv 7Ÿ)U#Nq҈WqzPA(@G+1@A6=Jdٳ4L9'-`ss;,:^\ZnZg7.^x6Ef1 mM5k;cn` JI/>^}؍;>c?m/oUApS#E?D?Qd4-`T5SZQ!%CvmH[L>/l\>mP>M8ur,~/ZsA(Zv 1@,W4Ss3' EG筯!1ԨL&=d bh7rn$:tbҁFntG ms%M\݌:KUE :!|"3RR3Z*j1Za[\m$$wIΞbUȪu4wTB:T.>G4Ir`WvYplP-&ȶwTZڲe|κ:*1# ylT u#r6_}Mib(vlLQc%#Ps -85"1Ǭu[%cʓ_?V7ӏPQ,آZinR-BǩGCVh)~TlV7&?i>f>d[GEd76,.[ T*T7̰NNXѻM6?z|3ʑwQ),lQphK?t w  t(5'i2=WD_7&GU5oJ\!$ JU AA,!9a *z~q6*U:Bg_"Ѣ%xɰywDCY$L&lVqX-f+%WM4Pt-Y>)}r>x.r7fxئ< 䳚"4' Uq:ӘcUq9 X`9 \NEJBq"DfZcjklSHǾv۶hUzǚw#熥N|cxz_߆5[-e/'ujC[Pɸˆ-zYu@ ˁ!-Z1 uU>$_Fљ-ƱA;R.BqFE"8c7IZ}܍ 8206yٲ9<ۛބ5+ЂVR5X;Tg5;텲J !!Z1;S<`})\`NT hn^g{u2;ۏ,~\{?Qϒէٳo=={hРVBnb򍮒!WpKG'vi\^Bs,{g2|~Z~<g%_rn,քӹ=P$6plb'O&wϭRF v$.aa\;Yfy)Xx?(6͒>&zq:+׎գrݨkȜN%JFN/h!ڊS؂2#[]uS/^CȥKp81L޵q\ٿ2 .VM֋ݵbKVQOafHƒzC8ݚW{svSژ?ϦTel4:[I|5z`6E^{pU'8|6`}/1rwgy Wglׯ}Z+,tf]DhiT-er ,'rp/ډ:8PC3X0d2!CޞY8?y,/8#(ǖlwSC/==x~u3٢gGp|:#c0Wٗ]f/G<{qpoG6z1_ԁy6a=C]&ս+S_@UE+l骢$9ҕeBMw<9 l>ƿ.UsD1!φP3☿uf^=ˏqO'<t Ӊՙ s·T]DDi٠^ݩVnY/7\2 :y1;_9 +1Wo_8I\%h(Hda|9 o1F,5(j«y."gtğepEU⯢|{(Lihv7t˾UEKtUQah+fz2vt`%wK WUE]骢X283ѕ#F7ter1RF5wf>UxʼvNr`4bXI=O7/F҄ %_6&'Ne *m"z.*ϛ̩4k )BI퓬 ]U*ZK]reā\UCW+ ]Uu*Jވ#]Y&Ϯ|NиGK]Uշ~P ·R'rZwIu'oJeC|鼜[=l;/-hNǏ|?d+5ŗ9;C 06ܫoOf||t5IɽL%rR%XY"uFz8jP*&XBX!Crg/`k&8OE_bt HOs8^4tX[/FHS" K<՗6d.#2 + Ȋ'G`Vjc8.m ye%\l~h:sg [xdeNoJko:?G$90/?넿^O_j٫uVo9Zܪ#N4PjW~rGV /y݋V4G5}[B7 Qw-dzrz"aIsܟ꼶HOE癥YP+EY2SLϚRwQ{6tޭtv<~5]Ⱦym~_]l -O`9-*^#:`42x4Wџonm}>F8ק,q]򱺅a!n*/7pn;[v|fz96Ӛ[^}VmyH)񧓛, kt񀒭NZkg`uYҭ!|+=4Yo>pm7.Fڌ5\qaM"˲ڡ3n١6:b#eM?5g*qöv4_Hg#~!ڤa#4$"bHl\[B:d! 5Zm=ב{I8e%MO>y9$p$Cͽ# 歯hՀƓ<_pk/<2xQ%&ÂwRSAqϠE82BF<)g+zT6D͒% G"ĤBCH"&Z&NŰ}|lǰ+!QI^,g’&Z*,rsdHp(sj$g %q_w; BE9I:r]ׄd/RG8g(*2TtRِSό'ZS~/}BJQQY YTm#CEG"E+1xϴIFcOpfr$6oYӚ*d]6T10IB›Dw0T`*ֆah*%}*Ś]1F RZƝq^"6OVg嘣G˗>UQET5-Qsv^.xm+'D2>kΛk!ګ"7,BZ$}.F)c$z`mVh +/֨'@\+&FpΎs>K6;Aߣ@ϗ)uhqj`bD-Xd/ "AJuB9Å@$5[Q(-dmH33 i5/H\ ={ & /bڧ` `-/!D :2x)x1)# dyxpDH+'ׁ#06 - )c$\|`fZCCN˼^qX%1iJ<T|USmJfA2Dc![IcX[eдސI!Q3-e2 CPˍւe (mg5aJ%V@%hFL*EZI15[N-`*$*ੜX>+N"piBFKk٠~tE#qq YˑAQH>3yJS25I&Hbc3 `,*z]rRqx"fH57]6ǐ?31$imUĐo,rLh PD* y@6 t-XYjC302?%( 02&HѠԙ"KI,( V+\C@G] 2roJ6"MEs%BA٪ BpC[mYbH :^K?Jbc]K]Jʺj J"H9&K@y!X-ՈLh_ 䵑uk% b`=dD0,x"?f؜:iz!<{AuP< )pLuT[(Hu*KZL5tp1 )v1 /*X~5!=ޙ5$w ў&> ΋w:% &(iI TsH9@hTgבGUs p`$bB ,HXeT. U &Lt~XXe1FꁶSMm(\VǃޭLax\ `q:&,:;' w >ONuYQB`D9%$I0w2f1{/~_gZwv2u1qWCj ,׶X=p# =%!z= \Z9V=y ԗU FYuE\.B{ a1=Z &oC@DhGcw̘qNp\owHX՜6>0#kFVLZnkT1!hJm`.6SgT%g&Ybm4\DAp2C "RFdCQ0",0Ӂy &XN$Jޚ5't9",o;,4&0QIo^5BR\ ^"µlF%Lzt׌I'y*C0t.Y7u0qXXqƜ[/ tZN*߫iżluiIЭxN`pԵEWnj!f>z`2o1mtn> =EAہaf1xj]Э!v碦fXsC ~y@BK6Fm娋V.e B`* GXi 'O r5p=a5q^ u&ih +Fp "3)oaGu6ag +3Xة -,J1<*G`'UV϶USXh t.cG\E Yq2"ƀ).Fʄ F5ՃW gFxqQڙI<(gҧD21USEc?A],Y#9{P*mAt Be_j;J*ֈ²6)?˙C-+Z 5 Ѓ 4§9|0G@ɌS:(Dp,[J]1J@ 8󫎗x@(\D ksڌ:gC1IXU]| s٥`#Pd+}p""ðB%6 踜V D .-FNeg,4BW Ty]rqbq#9 pSGeSnq,WyI8t.)cc]m }f7.'=,}Q3[[jMtXGWx9 /ҁv̱g_頽Tɰ&Bw5b>ED^D]ӏdل$^J2D`6~jua9Oٽs~*?|?@=v*YƇc42)c{/; 5qL:D?,(gICdJrcl| &Z/?#nXqW/6\{ky^.\ͽ_,h<y 8Ɲqanő۾va"}޷g'YA—*r\9*s$ ?O:DN:DN:DN:DN:DN:DN:DN:DN:DN:DN:DN:믗\pܼq]¼tV T}Ɗ2& enuN:yB(pG ƒy=7 w(+>F1rz"CTyt6wSpKN-ݽ-Sus4Lv$->ro?b`$4חe>u M*l2mtrݛЯ|&J']g-n~w pK^]G|~"[wo'QL)AO .8TNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATNP9ATjr L ʾK>wIկӨ]~p#F ۖu00~`Z:l(DTD.⭉>>_._~ol~"Z281\sM51\sM51\sM51\sM51\sM51\sM51\sM51\sM51\sM51\sM51\sM51\vjdw2ڊ?# 3iK^“O^Żz݁+7VD[C?<3]./,s#b򉵴XqVQR*kcbgzk-XHxW394ysSoùO)Րc@@TbuJLjn8uIx\Z4iQ4OsCyFqBRAkop+<| oښncemT+jK U,Ƙdxu$Z:-zݵXc8K{9EcGvx=_Cv[.҇rq/Limh"G7U˔VY +\ᶹ\\ٗURǒ$HNTmj.X!rYb*^7õjkە^OP5\7F^-..,>!t |?uE3]%z ͸P n[xyz̽we'绖e3WZ/LPo_~i&+Zy&!v/Ĝ_%MKv/+fwM6hg fw]gYomiK&15*-+ M>..?iY&uYpK>^vp搷OZ> [x= /+sx}٩V"xہ٢ˑjp_k;W@kܰ8[F]oI,L. ݚʝ$yѧ8;Se47#I]̦ IuLn Oet}=:Ͱ\o^ҼmޞԼ9-hn3!Nk@|kCf]`QPk[9Fk7jG!?fwzO'$Y5ę|{1T9Oz9{x+'+myX5]+|d%b,zr8"z_r[em^t3..,RO8wyzyW=|],r5Mܲo#7o7q3ޮobmn66[;?nxVQ. %ׄd]ګLDUTRJ ʋDɹV.mԴ?ʩ򙥏/}t^YoͶhٟNxqƍ;8U'{q;=ͧW?>sLRYBԩp"ӬdjvHBhbSz^3)Q^7jvbGvb;<-XE8q޽U~Urkݫy(!< |@vzTf iW?:#ȲV8 ۮU\409eiKA2^] e<`Cdp"lx E087JsS^3CnD%Skm#G_E?{l;$d\vb1|ھȒG&qݯ-)c=,Sc9qbu]_N X`WE5FNVDٱLǔ!/]On<&7M_ltք n["] 1C[Y n ~u{y%ſCFҺB]s +)ID "۴);=m>]/e7-W|rŕWxG ~v%optrgϵCW=7"\Kaա|ŻYtk$ %.WI4҃k,CՁs>'rW)zpGEip׳te Qۿ! /^:8sMnlt˺558Z=H82^h^+ìrLu AGo3߯I9;9Ue۟]\MwaȅA ̓:T=y@MxˁG oddx)c>wzWp@_Rx̛r{wP/lEz;'w}Bd&k<0J’ua.xƔ7,k0U  ,DZ8 3eRI2dV{C!<(0gPQ=0x)]'//KLy8zCy*'Ս0oa~z/'sʸQ!G?Yw11b9dz#.X*=t$7=CjQKEBdO"qU (YVcFk:ˬTDZ[C? Q rRjlG2wW|#m|yeѷ9@Mf}ѵlF>O},աOM._N6W? P-kT!+97Q*IPGH&UuTG^y>=Q= gp^)AsY/ ,29Aˆ9---X@- }L07T hd={N_U;["n]ۗࣇO1jc%<5WŨ1KT9YrF9$.s#W6Y(QLG CU[%cSQ/n_R瑞CKZ&P!:8t'6ʘHu!i 5i6n|omן`R0Zާp\@|8O5W c)'o"{{ݻhafG/9ae pe 1&*GtϓRruN%R\ .XeKpl5 ;X=Y@ͅsyw>_ԸbF0\;raxFMS˨ F%"HByRZ92"ilCz .q&dOqr^*{~&q"G  Z%WęIR顨yR[$U;\[K IVgD&XW 1*Kp5rK8+c-KA_{շT+OZWy ҳ7Y<-yA^#Ɠv6a7%pF^?_ )_yo,R\Gw>yc~P:&2up6I#W︾ORs?JV!䢥L3 e9R֐"gbw,xչl/86N36^ɵ'SortE^.i:9L z5Om Z̔my,^蠋wgѴS7aЩ٤>({:ny羦'xy= \+`8oxVCOpc7lv3m_WNހתʹĆ[V0d F>m_Ok/Zǿd⚑X'Hѓk^gp9s2#I1X+1;k֨c&gw}W}|6fqIo@4s ` :.$˽7&y YhAɪ$C+՞klCR_򿘜<$IHPc$ץ"3V#gvx{3!/:MN8Jj|;{$97RUXU} ($g9kN811B8k w0eL O63jFM1& ʑ0!CP0 c^DY:,ӝ48ΕtTmX횱V qƞp>v.ׅՅfO] 2^30;㸜47y_ ËWē*d#G0HE#hYʶry,i`@*DQ1h鵌(l,/0C4I9$.Evǣb֮jm=88 x" z S#%1Bj^$ ]6,F % $UaV]FyEȐK$$g&HǬ" T'tSX]N}i8(cшc_(+kDk^#nxCYL&/+琒 ,6ߘ̕~"AI}p4J Y#Rg|ȤɄ@ ҳFF -jOzqzLj\^ԕb7q|\ sǸ%d9;IGp Ԓ;Ajܱ>pϜ6W5yS\Dښs^?d5FOȲk6)M%6A"BG}A=WsD實eƩ368OQ!eM(Dɔ,d̕'51H(%O!jkx(WKx)E !щt2Ve ٗv[ 2G4f#^T.FAc2ɢ OrK8ƌhL@[ 9$lzɭ&ݩ_r='KM䁕qE9ňJjYÁ'(kRV3GP ͉7$#Y [hDVF2MJhY,gQZY˨MJeB! . t":k8">m sP21 '[b@z)~-?W;'j&ha,PܤH[%Ŵ"}ZՠV~R'=IpGfb BE 2X{S llNyVm2L>^qTQt=:mj䉒MPj4ՓMyX%g+M^VJr5kJ3$j4E$E7-^r2mL-,Q'זdE*(V! ;r`$!H;PI$Ų-scK6=늇md^y-fdt( $_8Hy@ )l0x6bB,<U"G!ֺV/&ѷ^ð[kן:g"j(-&E" ] N!ayb>ĚB iÄi,^/dcjb8?k''dD)+^FIi etwiހ")KØY|'taEym[qYנ",OZ8Ive(re-*A-x5eUd[|d=FG5Ϧ~sL ܂8tǓٜ௛[D XͧWG]F:%rc2Xt̛*]2_41r/o@ƣW腫\JX"1݉ Eߝt^xQKrbe*ÝLGp)L_ٟU]-o}jD݋VUCWLj͖ J=]}5tOz<8W {\xC+_i&`?eu]aOWv=Z 1*p ](NW4=]"]Y QzGHnҔeFn]^l0^q$-L'п/?}3NC#=uY1^1p~3ʧ-!. MܗU4]P* ҴPXTb:CWL\*v*(:;DWXt \]y骐9S+A!"w \aBWj}0XPS 'IW$R ;CW.v ZeNWЕZ.tG]3vBtUPr骬,Wާy)Zh ߘ5ן<;}^t"B Uu1Wu99LK-<ҪETJ[G0؛l&{@WF:V%*s}@u2Gvi#v'-p3Y*vQXPjًHny "Wu&-hM/%n,t% L׮n~>w. t%{:nl U+EW誠Ututxں5Sv]s`wzz\4;4]õ]eiia]Rv \T]zC{:EBk5"+p ]Ҷ Jm{:ARAf;*hn;]({:AҀv)$BU:3P> 骠+S&D}h٫Vw-tƋ6~ed@K?в1KFWΔZ]J;I,:khI7G@kt}K˭t2i¦wrpѻo;ɹEz?mBgLMJ+}6Aj(\\Jk^lgXӉOj]\gB_-?~^;=?Nbm`?O~*veEŧF?Rl` WKv׳:Ȧ~"GN`/6]`oח a58?wS:$m *x+W&'`r2h.%)iR ֗׵\n eJf| ?:GƂd:agVy(+L7o[z/֗my|5>uLxYd'%l@k]}N^ /4DecAԚQ8H^C q !KhԖh!bþ}e==u"!T>d E7ItA)(t ՊBw)T:4dX#i#)c.kBɅ-OevG](0}M>")HRBDV;ڐN3I{I }2V`B X|l&M{.'?a1eH[)8jëY^m{oݷO B7w'?0bLnpF`t`" n4*ҏ_N&F&Jr4ր˵oL*K\ ~ ]WB\.FqN4\`V.46FN8̟7i] >Q{i@ݹXE'Q˖9|pFCja|Pg(;9.Y,Sq .zD&XµҁG|tQ@iLY½vOȣ=zb鉩icu2~Kܖp1–8^M49&-Olz> yzWlӂMd! c > 6s, 4r-xkE QJtL$I)viia8v+SoKdqq>7Z۬{˴1&ףXOѤ~|L+GXR< M9:8}x7tY];"Q|o'?:Mٸ1oiȽ㍛@=0MJ\jXQՌǏ;Oj&cswpmWbn-!CLmFk!T$$^AR}S:)uסʪի^e=6^t]_Ѵcks+)- WZq#?7ȍSFHoJxsՀ"VWqEC sG$s \Gmz=g;a)V%pλRQ'5AJ]@$c RGhG 5ʌ) ޘ\ʌ[HDPsl?yԻP]M'>fb>1zՂ2DoWe;Xm=(ԡu^;6dƒT)΄GJhN>oL)@4V1Y(")Cd$_pH}hAԁRp%'3)Co؉Uײ8mU{;s MwA5o_vTTdJŬӮ{ek3h>v}U+ya͖|׎_ED#cDEȕf5b{⹃+02-LV^,0̀8/M \H @z)pi=Zۯ)#\iJo͉Ls)|6&P9eR3,q9QYk $GəqwQ!3FLBhtt, 7L!Dc Q ;Hcpu 9ugQX#F>g0׫VcYQFk)c{gygi/8*0o29ޗH/Xf%5F}EF)N 9 ȿRhQQ^ UAwtlyS wk>pUW(yx)W26fStg+%-oEj1j^۳QeI9-*nЁ\^բx}1u訔-kV‰h0`hc!em(0=C[us/Hb\Fq.R,Y[Y!Jg֐{Ms44kcpW2YEKc;٤N=`r\}4}v5h Kב>, /UO[y?GT>#TL\$Ya -ڹԋ穝eݵ{w]#4ݫ=:zfAZzjH.qQ̡CΒ+Ա7~ZzP 4?FCpmB{8_r=wxz/\q"M em&2 $R1;Aj'|o 8mHI=#< #[^qO =11C$ێ` !~O3 Z@ee)u" c}CٱfFJ>\>rWx2;jyxa>yp23/Ute>H :ifk 2;Itl'fg2_^|줞y6!ά c6]Dz_1,x`",R1]V]'K&3OS0F{gNOA_+3Gܒt`duZ(!zQ@eBy} K[ޕ;| PcEMGvC8@:ڦ|<Ex:tdgˠ H"h2;ŵ낆?W*Pl}81W|x?UUBH4n:'mBQh"J+ CO9+\foS5.YKf{]c67kr7 E.[uڵoxqV݁+5F㋓w}-ۏ}޺b"1gzn09_ j7Hq5uѰH{k}rZ|=iJ.g(0e3dR.yYٴ=-SX|*娓F Ibc'$CА$ ;!bYb > ~Iիwd(jDt#nQwu =j ak$:=ez^4X7=i^?7F6y]H5/T5em{q2wU_Fw7x׼!wѻ/\yp0l82꘧{nr |bܰ_FѱAͨ n`ߍx$h&:0j͂Ɲ>%o"׍U}`f󠙠$2"|9 <("Ri 'oD>DU'>JL9s؅F"ᘧ8.`,(xWHPnNE4KAHŀif>VxQNtvs(5^5V#?F|*e Wpy,.(p,Be30eHf4~ n4+A4U>Ur3[ч~ ף^=TsdxsrmqցgߟqЪ'ܶzQ%L.*YH9@K*μ?Z NJi*UE") pAL : m[ OzMk& *&q̟hSݯ+E'?mWE.оjqS]{pR3SXxS1W EWػj޺^X܎fzq3}<.uo^z6'R^2G1CLjY2oE ,޵Nc]PkǞ:/p0 0ce#"p!Ma 9Ĕ G eZ30k10l5ͭ.1 ZԼtkۛQ˘lBds=Z“ux6W0\T"b[KWs& [nSn՜M`qn :r#hI]ycݶݵr) b)܆&10[s7D򴛶̋93ڙFnƇha@v;֊*…v^17WBO2_%߯c e & ¼&BP5;LjnDh:vFx||&K#v1Y6)GL#YM>I_q( eXqڿXy:U~>} =C-;T_J@PFhP V I0z,YR'h3wfGJ|)ya`"ZA۠FH"I`ci9I1] zUD(K;i/wX KEUo TrDI(>M|Z$JNXcI+}!bJ2"R@])Hy([Qd#HAFdC=2T!+i :%gBX-0ԂY8Jg9`R҉D$2 RjDs$rŐ*@43egG:/m5U|Zڅ@n`SHC SuY-pKl IMPGYI ' |Ra;jĨ^K[DƴL+$T[E ꗷMrN:Is=B;ҁiw{AӠ( l"z 2aí8raL 6#8s6'=Jޮ"q%AH$衄rYy k⥗F,kETG1#ơL5EOȾħ2_{Vc+ˈs.Qkc1cJ,ZE2b/\*(AA#y̕QCl/uԡF'l3.0̏|9y3R$RJpL6ydhUd6ɪnq1Wpnʔ7W2GMi2\vL@ߓ&pz8 ({U$-)aX,Wh<+p[Bc4q5wW+Q0 A2P~c(,<28z<˭g97=뒅G'eeF3 GA2NaD;p GE,]RoO"lAK_n0F6w6 hܢTxGDJNHd 3âҌ;$a@cAyCB(@܋0 uŢCzĺSGۀ)Y ]4yՓM Փo.Ͽ{oï|uo߿{R/@0:AG #K\KzT]9֕(#w|+沵" Jϊf K9n_ͥ&Xt%v=ooiP}5SkʝHs׾b! tl1l{UmAvy,Iʿr;8c:DGдv)zKXRIѝ>MC5j1ueRw$'LgOi_J+O~Qre+]4fX,Qd eJ+@R&ʗkiWѓX3'ߊAPܬ[l?Vb$6Wq&o1T 1F: l0@R_jRz\Tkn1gRzR6pv/l2C s eşP;sI*u*Cn^p3@R%7%ZL9\cA"V?@OԨKx.qpU ѨP`+IBaA1hˬ^bCD^FOuW;cB)7j<$tdXDc@KJ)M-B"swv8t¡04Ib>{?Q}frQ|!S ȓ9@[2ʺ[n兿PS2`K.-@A8Sٷ}ڧH>x|<94L1A0ɵ r q"gh՞4#,RFXi&b+a>L@\^ Ţ8_qyt*>^a1qI0~Cݰ{ W_ Îbng}{X z6}2i垧8>?{dzr=(֨ŋl۲q TrzbGUOҺN:VиKԦ%"z+0g-RD:=*Q# 6QJ=i?4@ayېZ;_9w?Gp#!U*n#xewqI*ÙrW?=ds`os%AЏֲ;3rWF$a\,fwXU%)|68͠ef3!!+B=8:P%@;g!NEo+#-ك1 Dw||_kg}xXM.v_SB۲;^Sl2WLm~<z`;ϒ~r?ycb{bFk&]{m?m`۱US^7adih(zJ#4ِ9Z]Gvh_ g^Z4C0$Ƹ  {ѹV{-xg7ǙU`圂R!83DI ;4%{MǿnE\in_WmW!A8 $7k+J8"xI>f!XA策k7R8Nq@c(.fE;xQ@K1ˀPk% GR0!eO輤4~qP,zk'lVI,vVNch&OA<&׭spcz vVWq;{\/'WNlE삊)[chVRĄ uhMJIhdwYQ$r|L 'lRFo-p̢&tČ^}oKΗgi {kbd# Hǣ1ASCFUPB ^C JhdTfsh3 3h ޥd'.> Z}yhz=l?v®jߵ;%4xͫeE_^7->bm=}/W*GzX ~n22mm8<;ESF\Z ^AҮ0Y^dž148%=Z ^MG0阒43\2L%sBeţכ/+ wzO4wpZ5;8_]_wOmr)_ }̂1\Tf~L\z5BwQ.}wk}%ӷi ݰa/Q`TQӯXWB_cŃi߿il?.?nG߬O 'q 2oeg:BQOڮn<#7\0\0\!hb_U5HmV6ht(ΞD{?.-CHDf .T.*U Y#u ]ΤE{/9wȫ6m 1d{obmD\4NcOmݾ= SL>FW 8Fd^ p[QE5SHZZؑijҜ* vP});RMqe#+ W++%JWROCcĕe\Vv >[D|L&y~EmV:f2[WlZnse|o:_!1{iQ0D;C`V\hgz涓`%]x!xͳ]@m_ix`#?8AK/BFzSih/)r)CjVT==z䮖Uɒ7didd *pEQ>x0>LBjQS1lM5F!uLhZ7 I%r(tHVr1\\-k_.F*~=2wzs̄s:ul]uS{n*6C`:LoՃzU7V+kD-"+Tipz95=S5c0fرV}㞩KeL`'4ʵj4}l&M!b`L5"ւ+RkqE*p5B\ItW$c]\T-"bN0ܛep5\)% W$XjpEr=#Mcz8H3F+V"\`Wό(5Pl"M!r]U+l"+Tqժ1l}gj勹QJK-Y/ߟak\qWwyĢ5#Ϛ׫eqJg%4OꦆLQCh ϼ%h-*;.zP%󽕾S%ߣ /SH)Fo|βQ3=8챓`)Z#b^kpTi&`ƟW$3qIrm5 WOW+{ǪLk;몓`&{ZgML+;U>W$؈jpEr-Ԃ+RqD+9jV!j+8퍒fX9k(Qm@ Q7_M`ժC&L´U^(jpEj:HF+i,PP0zpErWV ޺"fƈ+ jP00W$Л =Tn*p5B\i+iF[gr^ Hjqe~  ]uErW ~ UJF+ځO[a&`BN[h'թfS֘OsUd`YϪ^Y&Gj,X&pFsU5Im=rJɁ}XR'\}=rwzw̜2[; ӺɅV(c7C':MoZU+ƊP:t0jª{38K6Wp hʅ|Ӟ "L`4UՂiTJcbZ2t{lEB"'䂭WvN0TfqW+ɵ8V*x70ٍW@ۚ4yّ\Q3Hj:HF+R AK7 &jE|ijL/x.和;.OoKW[%m*y Z:P]U[LÜ4\j9C7Hh9pNʊpE-TVWV ~ҵ'\}sRt1zh`u`tng Wꮸv}||G ֶ\\j=*cvqE e o j JiϪmJ#@lo _dj]%Oxis#?+_[>k}~Ӊfc^m:x_zs=}9?I?cI&k bh *}2vFɄ|\Lc{ũN'6&+t?.3/Ϗa)}ww'Է"15-(^mF~سv˓f1(2~CSy`!eaw"O!hvǹH&/yD̯r YJϙQ(4P, iI2xmBe2W!hѰ$+Pzټ7:{m*Mw?\^4j43q3۟Gڶaqz.с~fwO%Rrx ])u,\GкnOvi{gj|8B^[~ q/xAi c?f%( (J'dSٰdV{3ݥ &@T:CĄ0*iD`Y,6N(׮"|{[k+kS)ygVOq֖% 4Wsx4?sUs0zx4G>? eԚD-'Ѻ/ԬL.$H#@LgV >FwrZlRÕkn7 >Ib6?k\_‘U![Xnt#,G:[ʈ-;.N3d7=#s̠<0Wdxrt~ a t&/iDjȌCzِ$U ^?|3K-",[tOc , l;NQqC::*$wȶq,#bDs MpF o|n:*R3&s)RRnD:ik*ZZ?15cZit~$ ';C9p\|n#ýJ X|_ܛͦ&Wbrn>_l  X\Kj6d(gpS#yh"C/}D6oOsgvS Uu:?S+mM&k{3GtXucPn9;c1P|冇`Aσyv(X΢QZF0DxmLNR6z O7gX^|4|m;>86Br;t'g`4>HEbSd4fMo/oP,b NGH "2Y  E (K @赾3?RRYCm? cIkn3 ?ɎRF<+p1 |0,,r3~lތ?݌+{+N[3awt:cb<>P0N.,۲ 1i=,%HjpfqJfYYn.E1NmUԩj?ev"pӦ~VVurAQ}6}Ѯ\f>yֆ׭ȫm1-qN23ǒl&`VuIn[7jO< kYsB'1M5e'z QGk*~<\NMS~ӃJ<'A_UPܑ4lgW-pY ge<@6yvnrC7Z硫f3Ka6ɬ>|0AU.p픷aRS$[=u;:Òēeˆ"GmT3Wir>޽[g-G+ JBqD?&ǵô3Ai(S:$K'AIJח-іY߀9(S "r ^*'@!EC>\ŠJV gh^'F/K`HWR38$gǗDY;SmɖY}k<+Q*K<')ii5rsU`|m?E1ka[=ѫ Aybo\ՠ>hƞ|qsF T I.ZIHwŜ%&(r 1 $;EP21C7D/(18-(CN u;˝^iϺ.;-Ol)% g|҄Ϊ+k^Xˢ~k {[^EҠ% YevjnOz~,iVS7ɝ22w+XS Rx7eXn=\pCl>c;dwM#w)o-|k|ڕڽQg{WWywc;E~ݴmfwVWˉ`OCz J)i_mȰfٕ/̭a~+riososՌg %bK9*v1/[AYa1ד.vx%̓#^k|PK3d |u7U@Q1NCP%@x5D[K6O"u|/^k-Jz0b T{)pJs*_p]_U;qU'@H'2Iyqн{wr;(>Vג RIKcҐ gz $(&$uO7:ܙLt~$kzGA2LD%!:ЈC\:\YQ;.4&.#"T39TLW-kE !duP^3I&hK8FH Wc 'NA;:Cv[ ^ya n|0 &y'_c]ژRR'$&_&M$J@ I;)0 EnCP:YP$?x&P՜(/ /MbV(F TGolr^Y*`9(kenQMZS.8c$r ,qƣyIv`ԖhI]Үi'bpܣqyR$~I fɲE2FOW|J +…q)7d?uP*-rE&b`ZZfZ‰dNj8(.hWe< 枪@ V"OSPVюd GH % yѫU$h^Sew4EĿ^?FT3xD=$m$"je>qRsQJH0ģDE:$pg$*YFy$Vc4r *f0#,`Ć8}]|u _?f߸*J$Cz2UI\9 8`i d/KrY}]տC1c'e%G79ӫljx7@/*|,5ފAu_@Ag e"]ѹVSF(OoGvX=E u7LʻD..s|n ,E0[7rQn=;q3XFvpJCRwOW͏22i1 Za5C#?R׊ \B`J#1U9?a_WO~,^^Tg[AKi2Kjnۍ ey'w !M=ړ{l5vgc71l^(]m.gz>'i{edw zm1nJ|jK拐I_h GtD1vlET"P&{El2[=_~o^߿z=e|폯p . F,zOZt5Ww͍hӵrՄW.w^:^[KOfI_Q^u^5+dЕ,b+ՅBb*%W?<EVpKbC:{2sM#a$V%8{DiE]4#ֻ3$LSud7",#ƒǭ:z,KA- Nu^6k/$u՝\隰sSw'uGJLԝKY0B7cdZ&HG߶Ct!rCiŀ wIbq&@3tNjք'"|Waą7o/#I;/J ~[wnqTGϤJ6WGi8^H: ǯIОȔ-%R@"ңړ]@ۣBb郑h7Ɓ{Tp2Y,O\p-Ibn >J9ubQH*sPa岴vnnuH ͮ0 Uݢ zo<-VZϣ-V [;Y/c$1 e@*`Xc\0kVؾ[cUҎX 7{[/A) ٢!|JaR%*(A.Y9ePȑE7Ơ1f_X:#}߰v1Cj`o斆QVQ꘼թ$YL*eӥVE&$[8Rd%7%\LF\' KTBJtɗy:9Uh=.edr;^G}z2}nɁ}0_ G_Ǥ+ٟsu* ٕMBIo˦rǢmyc#gLSW"׎M>i0S4iȚдG] MFg˨w EJ`:Ib![#m|zx>m. uM{1EhWeR3o97AȲ$>X h)‚ejj;k 3e (P~-؟r',JpRT)'5h,6ִ$HIbtŸ"ţKǙltsNA ), |!(=`Xc9 =[)GTՖ'1~:m 1 -QFF/?Rs9H53cMm=Ez@u% IluqNH r[oj6Gn <6~WfNI45P9КͿu5aT3!jGŵS<2KɵT'fuӾn84z(-y^R/+(P 1Kށ_lAm.hGS*!KeKE P(:mS (NmNZB$՚iv7ZP\(`½rzogUo\.U̟ξ/^8c2Sv|PF+t6*#)%id"ɢtۻ,C |\HQi:zJV`0) MbZojni1ZǮ-Y{T$yb%"Z"5"kxȊI,9i8m̀64uI(:låc1qѐp̵4!x:[zM kqʕˆǮ3qdZSIU$Gu9n*@G]1߹ڇL4 -IY2scƙ:`1dJ3!T6ـHPVz *֭SL:/.تsiY+ٕMc^#/x[ϲh$*DAz'3^ @Z!Xn5 )kc4˥λRo/dn[6?rǦϑ j{t vo)։:dKe٩m X . MF빉s4c(\^ ȨTԎ9l@+sp eMn? Ljq/!$"FpM NJ3"Rr&#KVp᫳>m 7˂YOf1 xI9;5Dz;eH sкU+8؄r8;=6OVM*Hw:y>}wjKtuKIo:U=/|lȥ[ hH- Xk dLB=xo cOOO)"6M{y8H9'tXUZ(m,Q)$6-5nSj45.DpG"q!'0l;;PPܯߦqc}cOHJsi4Whl!;*ZIec=cZy t6U'o i:P ksӱ?#>>ZeRQ`R:VDLIZXtNxQ(?e[[ ֬ ,C^:P%3Elŀ wIbq&`S&Iz}_hf`3kXNK_N{g%1^u(%3I*3H*G}M(Z,` gRGЧjڵ/) 쨣^QՆt6Qe*뾖[(t>0ѵ9؎J뙔Ҫ_G},y-+ɭq6O mPݛebZȻ7ґzt?;wΨOV+؟~/?= -/ڵxJqT2ᒐի0/~֍W\w 7K}懕8,u[#WEnꡫ^d?W/ca=ӹїFVlwy__oWg~x/~Q볓e|X!]`㯿L ѤM׀.JPk|pjM<{Gz [Sq;}uO3 5FȴH?6Iigd*̜t$r} c|=dR{}K/)x',01%x,f LRO"dmd&vxrM2ꢲ V7~g("H"<]Ő`=(?(F(@œ4jUR+4rVd}K2޽3oX t޽evW;T۳Į,qyjQƱSmsjpeǾOU`ǩW8&8^~c7ۤ6;*VMuazt8q;j^! " ׇZҸ7WNM~?L泳G)bL4w48x[ҡl|{ +` a(4MhEWD* j#M$*+\=܊]P#]BRN(+ ]\CW1}ҎAk+McU w"UE (娮^#]gR6/v *\BW=]UzHW97|@tUDW. fEtUQ*3++Džtu~v|-O~EN˄$ϧ-=eKc lQ{_Nz:5)+s:(UȎ҉9zsn잛3?w3J]FzU'Ɂ.VXBa89ֱn@Us}a#?/?{KVN{`Ta*rdAdEj!.QoYr|r7{ #4P'Hi% +\-" +ZtckFÐ΂f+CW}ҍ_]{zjbl`vz\KWC/a(76Jt뫧6vn )JVjnl݄J zއ|(4]jw( 4 iZJ8$>$X+p`誢>HW4kuiP@f8jK*f]JtuY dU|7}zW0[o>IaY*@ XBMmmePC>ͻy{hc^_4\klQ}5~P(AWF~Q0Zhu\S V厽vͻ@dz6\M;gD{<d"`,H #/*%ֈ𡋵(֘̾Yjf1t5FZ ]1ą>r4$t;gܒ []w+;^]}roāoSoNTQ:޳;O't, GlyP'~ts~ج۝_uP&hE:]_^_흍miRl,ޥrra9XF~cOєv3Y?aO9Ef^m[J?"Gs3Y GMb^HnQ1g't<<!<-joQ~r>3=*?9rlؒCIilMHΪtN݅]'jofuOno;_|8h4v~zdb}rVp)9ӓ.JE]Bɫ[S}ˬbKQ+ ד?)ͪB*Ue2lا)՚q>IWե?0ٚE#Yw;h)L 7'ZZj j 9Z(\ #j QS)9;D 6FK $T,\j6Jk婚 5 %S)5z-pO= Pc2c1N#1;76{PtPQ-^rJxh`H]DuhQJ=@ܽEٜXY2d*CdtCQi= !ƬbjO( Q!(}tAgu"=ߦջTMVT)l^ ,sbrX\x_ȧ\|8oNBcVU˝=I5thncP9\:'t ^gcKѻ)F$ZBGZG7HI 1m€F# V\jH) AFUK!Pj36.WP4 I;mg͋ŖbS}HFu)[_c 37% >{(R.I 0kjA 1Ȏ ўF }m.5뎼0Q#_fHh& L9'XEA)^ TTlPtA[w Z 4@sXRsM+~0P(PyUvѕA[,<[]:A8քFWXLčd BfCc:vL\s \/ *Ael0 VGR7fa=g5{PѦ J(}5 EɷVz2LuA19vx X*SX&E6qB2!I6J% d> A\R}S:n3Jt*T[!z@g3 d2ymB AvEsze] Rmg &n%W&q2FN[@zNB€("29{i]UtFtQlƓHI7C7a?%0RHpPgLI 4Xƥ#3ص~.n+B4TCA̬%RI9n,Y{yGD)J6NBRFAMu2R*LR[FgU]%eI[\`3/M#HH/eJ[ԠˤDd3 6A Z"b C$uWH&мGwU+czh2&M ԙyA/7vhoz1#.UUq1,4ߨ(Pl^!0F!N.`lban:/ܜxy}0-ZFtjl{]`!00 ƛAy@y>o:@GqW2){H\!YZe1%O:? =/,J茸pP4I"iyM,C-l5tq,yѼ<04/! OݗMd :Ynm[ q;2p /UUBN5~d}LU;V lZ .k$ S`M0Ov?vge8y'KS}Eʘ`d̾q6A@F4=R#OAZ¬uԆHTʗPw}CPGm玺a4(ڽ,=k q[O -Ahg[RC;V@@]C2bB ;h gՌ#{v= `8JA" td!=v"ά$1SiUԸQ] C VA;gME תwEa¬2,J cF8gTWH!(6DRa5{-4Y{نIʣj$k4YICx(m@T+u[U #o$XHH *50YT_lzd ƃ!Hmi`{ɺ=xyL{yr~n8w,㎭AGwH7llrlfѓE:4%#IHu53%8O)'ޮ&hp}YeFĪø <%2`CrX)lz@'3UwW:)Jjn 2TJ`QLYAz|!( =`U߼Yo<+TO+IiMkׄ:H& 9?M bX0j@)'ƌƈi)7bQw<A lU?6ЮV?qֳf @Ϥ_?6]EJZ߲ `m^~8+c~=߬.8ydu.~A+k6ͬ)۴Ng ~pڠ〼nh=I8C=4|Kt`ikkYƪ&Z_]; Sb'a0}oK ^:zu $nqX7Mp lsR@@ݗ("T cAKv)t5~:Pz#t+zӑBգ_ǡ MP]#芄:c!bjz)tlա@yhŠ+,e΀o[xc.=^iof ̶'e.fmhPy}ڬ5>C1GU XXGִp.Z 4io@M9] /\BWm4NW@iˡ+Y]o^HW./@@^"]q4T SQW@kܡ@i += Ċjuy02HWA)]ko#7+`?)K``L2X`7K[۲Hr;sYzX%a-[U⽇璗'DWXӡWLUAk>,(SW\x(tK6%v,W_0T!]jfXQi\rIm7U&<0l )ehwq>ls,pjFZUYBAr"sM5YtpbrLJ{J]nlcNFdF Z}D# U3,[~p{[y.P‘u=JwtuhsF*]N \+N ZNWҕ`Iu3cu8 arsn:!.=&SVcr%CӒ+ss __kUԩUAV Е*+>*p ])cR!]i,"NN]+٩UAZtUPj;+#wIW lвW o1SmGczz[.g{v Nq=_fMjjM^绛76srzAE"rr썟ƫs~C]/ |oieʍ6DE h}jpwgwӿIuLzT۫TX>Qj O_d_aH76]p'<{+ϙm@O6dݏ~>hʏǷWRdm՗<7t9.[i6`9d},|hB[/ѳ%5O0R<9"6%q=ܙ&acH5Q7tɆfs[^/4TqjAk~t}}*)^mU^4ﲜS4d`s:ܕèEw;l}tnhXV(wpW~XyAo[ݫطFXєu1y  3dR;2O̵2Y7Z:gA1, eu]{ ?,*.nex\?_(8K6*{Qwwޔ+:h= Y[>G6Un>8 W%,yŭp},?D:"s ysm%&Vy`DPP(1JZA0KnCBOzR pFx%n3Y3sF>0ݏx72,B-͸iinOKYʬ{C]bjjvͼ.4%h`׆lOH9i|`q)\e0D Gtq t<(tֻL8BQ،$RWjp{Z x4 cNֿ&"m:wjˏ1te؟~rl 8o,kdte@#EM!XAE$10`Vu'L)N0g7Ee&Ț%"*ZZ8H jx9nɐ,qC?.7/8nɃa]nPw*Yiמ O^ߴ#ZS9wh /7&*)6G&mQX}|-w*d´l3?Xy{Z@>r6ԋ Ra!s({:,l0M,mUMS!KeMYrACPRvAPl&Ík3Tmfg;3*ոeȅBB;.|U.\"*ee|d۝֤NL|d˟]`t7O8c;! :&3+R!*42dQ:ӻ̊^C |LHQhSFJ+%Lʡr&Gl]lg0XXuڲ2kˎ;'%<('"@2 Ԉ$ZɓϖddJ(a| VMʐ+D Kb1ѐp$xtubj_{L|O8b<Ff2̈cĎxK*K愶F1bKĐ$쪴f}@hudP#9w ȅgB *4h8H1R$-H\2u2#Vﮐ:/NsS_g5+9Me^/vţ%53H!GAzMȄf w)َ_Yǡ|$W|Lm#0)AiIp]e?Z +~$_MaS3[ XM464eQ&MeOp|?qz>sG7|2[)nljCANֺ RfRَ)F8,ce!RA1̠֩e<@)1_X]@S@8UP@4;C!щ8r9/W+@6e{t畯%vO4fv~h CܥQ\h4/(Aȑ;y| )R0)Sd"e^ʨj:ϭ湋-S=03r#j(,D2ZX @a\:gqQhNܪLd%\vǤQdm"%x(wU~7f.3T:5|ivdHY.IE LX2!(DS;,|!޵=TKƷD2Kf4_DCB F`D#,t=rqE3FYmR[qU0z/ZQL{K}l6;?N_gk][A_]ms9r+,$uc5WR{^o[gkWljL:-1R!) (k>`~/ezm $Ն\LJoNp8|F2u$.֏t6 m5a7ɶQ[5^I3}~}b+%$,t::'6?=)n?߾>Ͽ ~@'\ig+ $v/$܃[CǛ_04/547iЪ䬛a\%2㣮wzB?z\|a~O|Nu5o=e˵DAWf Wl{&YC47a?gi BZ"nZkÍ;Ժ[q?J..r 4{Sle!/{o"\]m~m6<}xd׹mvk쵻-0vbhmno^e1`0.\gk ;a-l;hHk_1/pKQpG㐿@6ܦ߯_r&__ɇKǟ~g+~J?ޏZMK܏JsM%d<5eRGA1_笖7YWfşp?԰r]Dg֝ЦMLv&Wy,Kq|v-}QZԼvz{?~u-z_5O~KެR3'3 ysovu1%d{=l@(\w򲇻w5/H.A{|zc@W Gji:[&B:a7AK_ . 臈4VVqocF%bp0z7 c; ^B0kM6ɚTdA90.c. HCtB eKRN,9Ӟryh 09P#4Z"mmS9; WGt.19){Yv"־T];{86,{qq VoԄNBig^k,*BVğm(}yٍlO6]=ۑg}w`M6tG2 ˜H!FA@sG %Qj1r9:= HyP)%Q^I%Tވj/\ P u;ɹea(]6N񜔔e4M7b8MQUii)X{YPWh2ѦPy9 ?+#4RjM"e<${c OVBX Adt{+(:克0Lwg8u36ah ZZo ́dL1́#j:>FAUFZq2JkMr+:w#a%Ӎo(0. hа=Q`e"ZDi R)ifI|7ۤCqZ{#+5W9h)HTm}y桓7KhYR((ASI :zJBQ*;d& 뙱1@©IeyZDS*rnZBǔn S8j;|Ky")> soV045Z/;H(CFH}+zxr %h,A!JY;s (<͛>:g9u[ӊ3֠<p16q%[FVRR)bDciuZ(}x~>}9Hdz6_-O:l|.,sjJ 6@CZ9͍\Y\ C&v !fwc @)ǃvD i"JZ A1mժIՅ -ྃNK4*>&?~Tsq7n}o5iBe[(§U3S }`C7P.2HR%h C\$9 j_$+]I\O3OOC>ZYOy7΃؝-bB>,*D$NQOIh$*ωFqR4p Ա[9DQX|s < I7*~Ӓ AN}S)GtzZ]G 6Jkv}@lDK%x>M]jLs6n\ '1);xUR!!l!{"iWs6( 7:_`;\\?LYONיSIe8IOGBx&>-y٧Er!{v3gbއ8O%y9t\'hR7{ndzѥ#nY[W ^c]|1r]ާ{Sr]1l&@n}Yvٶi{ںdfSx+,ޫ!Uw!8~ɞ7;7LNh[n7ϓB"y Y{5[:"@B׼-QL빾\m+ە NNY75#ED?yD.΋4ӿFA+#2 o?lp^mqKL&xT~.47yv.nQC3n '[p3oquI~yjjZmދ[:6aadM"{a XwqSIJg"JX5:;Vg }L?QroeJDM  U*UIt֗"Fɑy [ЕYkgx5?OM 灑>Ġ| $j1<;2E4GgդQӧj~] 37l^?tv^畹o|q#.Ae9I]t(ic| jGI] ?l.Ud0,*,3J;#(Kϭ]\VG>␨I4r`@PwIe`X8R([RE$$J411 s[ <32bY#Tȶ9;Fx19 UB%cl Wۯ1fr2j-wpE{fM[G}fڈ-]7RxGӺRvIZWH̓,SW J>K$1K?f vкrV׍Kv{^jy?G ͟nj{~4L%b.>{uKƳpɐޥYoHؚϛ]6}v6z|MYC_:|n.SąP̓4dLipu{lWs0;Te>@RH2EwN(VzgDrJ;-R45:ภD[M(EϞ&(kuDhH )(m,jR:ILJt rȹ9jTH qx0*G$(vˆl@s4-5$)r:WRcTx+ vV Uv+8$7(bd(z ٻ6$W ~:;cƴˌ<%(RMRrYJ)dI2ˍ/_dFT}mz =ܸ wg(Y0'jansn̟g^^STfqV]eNʟoOfm'M<n4E-^_C~_8Wbw.pCVm:S orVY9I݁\}{~uқ흼/ur//E[r"[gK.mv6]5Y,]Knݪ^bίZ[ lps. Xtlq]{7dF=(x,flsW*hX| z=/;w=aB65tp6mUF)TGWGHW VTսO_xww>Q?<*jT*u=X`Y?~|KI *@PT+!|uf4ֆ-,Jt(XYqb4i]1ZCW.mV4(%sm]`&ik VJt㎮FU r.*ᶆW7(Ipt%QF5t2ho:]ettu4t%ZjUK[vƯ]e[:FҘʎʹ?(w@q (PCfNf1w%hd." .pC}q i8O å jX~Vȉ!7_~lhy||è~ˏwoOD\ KP$1B#mȀi ]\FZѾǽ?]eo3^b1=<]mXClCIFWj R]D2*e 3[CW.%mUFuGWGHW ]jRʔ@m=BzL¬9S[[ Uh:>xfvpMkT%}]i:#i&k GW жUt(5T1WX6`Mk*å-trt(EGWHWys UZCWm4~Pr;:BmZLgl1d4]#]) oӎ(f=tphR; Jutut1eC%?K!s9dzqzGΟ_@U ɠ44&(As(g8 yPn~̾:9VN N Qvc9b gsq56Ȁhpukv&Ӧtѻ(Uୡ WUFT*]};tkA¶ rt\qmPKkۡ [Z[Еj׮'ZS[DW0ӭ U*}<*]#]ќMEQLScraSn6"ilZxUQV:>f'T@ŵO򰄓%82x{rvkgp0zzrv_H][s`Q9 (ăH;Q!JR1 䓽Uى8}_N9,۟g?|cusk-r'<IQ;"b$HƄH.v#0JS8Kag^FKM =[Oa(l?|FF|{X[< dy- -Z~y;;!>@iz*_nU6.Q{f)f8&5#åYhҌPNq‎ו|YgU}B ]qvZk!+ڣv/n뒖倨b>'ǩmWGLH"-CQ$ToшoٓWOl9g*eFG?XEY)ox?A6]R>pU ^~nr~*kB-l{vzD+SCZP㛸&fZ|ٚ3?cQ+Rd$?Q,J,$OHN3R:c2 !C-őHL8W`rc ^=ktO1kraPo!p$Hs  *LS6R@ @G%ˉTj{`8sme'{SbCp<#2k'[oCW7Auv4eWU>svY;u(q T[55Ⱦ#`>KoIγj^|R&89X.1h_4"uU0 M ߀|Y="nIs܏NrjXNxac,r(6JLB2鴑xiqg.FovAka_qȠ&2O'OOi[ЧG-R[5GP<@+ ކBӵ=]^ #zŇd N'@`O1GzFq -`R}~R0 lzC(f5G.԰)˵΁3Q%=Nk)ߔ xޟLMkpeSH!-E&WKObseָ2#Zx3dP :ds~/9<^hUP<ư$$\*&#Vi$%cΞQy#U(d DkՎDhBZcA<4ÚKA(Ywi7h_*%,+;ICMt boqiR|>|s TbJIDWXBAa (WsL e\Vt)"@Jq8sxg1"C(P.(CR08m68O YJs,%%c% )]`2 =MUց.L K WF(@ ݤP걝Ƌq6'̖:4 ŀ5* M"eOPPlW@}35H>"ZP4p R$,fJNӌdI9EA)/\Ԇ `8)C-Ѝ~V^|d1[{>aס7+]^T|O/a9͇Cl,aaz: NF5-J"ݶXY˴&c6DL,IE)1AbX'-AjQmTe? S[$ܕGS彋\Y.͏AG7P` ?/Ⱈuy~ rH Eqnh͏]]pxwe>zdg S*N,Z0D_bޥp-6cݧr.kS>\VƓc'w!ٛeC  E[7S˳)$[~$Xi%5EV~*VbCVdzskCF;L?wG~;Ҽ)ƇN[񱡲xAe P q8͙f,G*_|Lt8+"/3iӫ?EH_9ox0Ŕin9^IZ^OFO$, -x[{ffKNW p5}ZӶIhGU Z޷a[:6i7]XԤf&i(f8lhe/lx6u^k+٣0&0iH`7h Αj!.->RʼnY*mU 8ȃeTKN@XdacF2Nykχ ίS5ثWpozq£Ǫ#.>I}Q)ꠅUϒſ?$hPh.Z0RBeNRy_]^ovτNP087Js!$K *g  #fApE=b˘ Ri >y1xFQfr5rv{\pvWc"T6.c ~(&00x*tՈVRi+\7fU8g8ӲⷛD4_>:g"fO \БVDUdxڣ~Cb#Q}t&o3iqrZRdpb[XxiHCNE$-Ӟ5T V@',uvu; s n E'FEO^,/cdVTDr: Tjp@?)RI4*䲎PQ~I?v8{ش7[m~os2\MG]r+Eگno.f{wpb_Z; ?,uN}Dn$mZ^bzm~tĽB.Vmbi=\vgl>:nG=5_yf~M[]ݾxmPr-/֍nYsuusre0g .ѡ}E7𕼁dp~(o٬ʥ,e_;\)(H{aHJP23Ϭf}y?݃|=C( {zT<:gW;Q{ף|GV-f~X/rVQ]1˺|58yT&p1 *yaLً(4pt-S2I+ &E|4qv>r:ɲ\v1Y6) J&^:!&ƭkQ+ LFe5|(CY2!,ɽ%u9.jCZԏPM)U#+ 9dP3QgA{p8Lི9x Q: ܌}o_G=e e-,%M(9180րVU#݅y! Pg'Y8$$VH-Sp$EkʂZ킊mpFj)nC}5>7I}ȇz;ϏxS=%g9Nڋ &(ّ>"xRx![QDZmrUd4iJY-.eABMGAwr9`KsB&َtjXX3eg5,ƒ )_͸#okqq榁EaG Gl/$rQCBg-reʱ؉I5%\DM)I#,Jg_7W^:MC1BLHQ`S FE>W2G4)ʛ HVu̮EvĎIZ31qǾ-+Q`7[ I=GXAd@E c)YpZI *fml4 ]ԤrEV4H$p !őȣBLY'dT 2V#g;FlDcAj/"ʈzDqc=T]&+ E.cܑ;QޒjP79;Θlc$>eJ3РV KZc,W "ʈXң*|ոd_\4qQje.ђ` Hy`ܓ * 4I#AN: 2Q<\cjܱ/xh@X 7 Eejm _)jeGT6 *8ZCƨt+"R/|p<\Fc#0yhE~<_MQ!$;$!$0g2FG*zRjULaҗ^eVuf%/UU8B2Aer4DEVۨUTXIPZC(搱o@({ iM٭0Zk_^ϠbKP\M -,ʢx.i&Y {yI)[$c$yR&DX,+M%~1٩VHlRM$sޱh% 1%1Hd8ͥ'I*g DsbdNwDVLSeh/+Y5rvV٪$F-'~YEh*9pA RKEћh@jC\C@Hk&z)_8O\͔tj,·ŊK63E]އHAh,{S.dȓk44%ыG=UƎ*+t Z[O!qɰmr[#QnK;#QZEġ`W 6!z*lZ(m: K8tAΏӐ0 `ӏx3 -,gfwZ$GCqs;!W?5{-76\ZqQJTs0wOփE^ŕ ^/[WΞ k/oI/s+ܮ9H&<䏊<IGFtCg01dbT9vyt2YMts7r4psK7㓣 yMvyɻ)mJ'd }'|0]d>EzSx&]];??~]۳Ͼݟޝ[Zu~G30Gᤓb~zGC׻;'so9`gBwcН Ю#!.X'dֈs,"'biڇЮ[ކhӽŽyѳ"y=[) @xfl21DG0Zg^|Q>[o-i!w5 `1ȉ`O$ց}2g~wjR j}ތ._gK@W7ўux"+W jZء#eGg?.e(!@N6K.+]oy7;&OY7up? ~ʽnyRxi 4GXR 䢤Lb.zPnN| ջf` ~k"[-DIBg{+gȋܠʕ~X~L/fP;\2s>"e&)%W ֐Ȝ P%T@aL%(|5+J48ziJsu4*՛SW c#8uU䉨\-NC]Jۭ** n2OPWSW.=!pvB*lɨ+B(TU𶫫L%ezf+LȏzXѬ4Enyx(g\-EX\vY' wx8n 5aqDH)O?o¤'`GRә\OEMgjhTJ٩7XRW`CNF]tPe2SIiޢ2q]>L%#;d%rn˒ӹJ0r*ԕU)neF!DɈBQ: k;g0,ͱTO"VL"wpTOLj3*V4 qn/RIjl^CZ"eAx"1q' ΙfLpc}!%MIŌ fDNve8ە5TlPK mTv;"ovq85]'f-|vw@pLHXjģe1j`]OшХwe[V>J,$Ox%cntD1 c2 !RKq/6]GOL r T.Q) P,'7]1rϮ8|ynp9AT7B9/3^KM7r:1˩bc/,"r(6JLB2鴑xpO+W>MY|3ۯHf{ ^5]v)RXSX0?,U&:܎着ޢ鵢TARH!-E&WKObseSWA2m;PhisN2Ϲd1,( IUZ;-`fs!ΞQynj&Z$.':Lk\籖 GRR2y\Ҡ6KD:`p`؃:)\<")6+ ? CڑsBV(vNh_[6#mJ.8Xxn}e p=G:5)h9h3Q#5"mKtqx)hr)tlY˴&c6DL,IE);c:OZ#Q!$ծ̎il:/|n]3ՔvHJiR zg]>#,*6ϣ6?ofp?lsy(&s!cebb#ۘVgN!& d+K@_/Cԩ$99%1ٻ!eZ׳+{TEn3N(w3]{:lp9ٺ㽲#xi6G>S5'nB8ߕQa5A6en+Yv޵N:]À Qr7~+Բ桗*kǡWήLV&JF65ۇΣ^iA){lZ܍rnTݽ}x#.پ3qsۿf9 ܉N4݇yg%wy07= f9]xn5K^=_0;f%go5f+n=/_w{qM>Rf!t;mGi#Rf~6hq2M$3GHLY*GsJ(&*>+(4.JŽ*@ԿhDɖ^ܶތUgTWE&'/Bi1.-Q)ڈ)JB2Q ņifu sfApep[͚ٹtxWi˲ v.̑\*'fDLL9~@-W=J!o`.}5'My݅jK< <(5A<'D'jK7FA%8oO^?{WFOIpjKp$a/ղneQGJz$"ElʞVf=řRC,%4br.R.lJ2N|X츶D+H8'8W1s3|EtL %3SDPUADH1;F4ZB >QպZ*DelIF!WlgU,Q#Xq~fcRYIa[OJ{iYtP\Љu Q0(WsCI<>g6|S&LkI8>=[/eRh2ט/g-7EY?洴;| f$ћx^Ζ,:Gy[ N#XQwg~E|z{z=. f," ; (j Ko2fy2OrӤ4[T4 ;?id3?tgX6?Ne< lzF8o<9S!Ǭ iƃ,ff*1u\ C5Τ &!T>\}mڧz>_%4|ͣ ٺ\C[blS(PXtU +A!F9/˖ԫh͚(Gغ 黣۟<>]W/r>{5KЧ3q;Vm.>C_ïr_?l[ kx=o T~zl7'YoshjM#%R/%u o,(1B"""K#K1^,HU&TLUW/KB0U%Q\ߝ bs/V 6"lrn= Ts.oey{B|0hkdYqʱxNUsЮ&J1xZl!#PET)56 쭀R585^|6(\{[/x`CL1Sd"={/bY4l[..,zrPVt&p1T3ʜ)69Wqp8^(oTSe֊I&&_Md ZuU ^ '.9߷#wՐD:0ڦcJAHk*%`ն`EK"8;CJ$3?%mi`^Em8%~)%"NVWmR$xAWEeP >i/PCfWN GufH$hD} (wVnlWT↹R86p(nɾn@R/.fLɪ{LKXN7JgiL[OϿnԮVyZv(:jZ E%*acblM6TcEN$X-2z(NJk*GBLK H]Vkob#2vgeUn3cO[f l᳗^R^^5d!Wzyr\NޔvaW&?C<;?;]\~;˚Cr0!| v+ʒv̘jSLXJ,j軋iVЂXbifz&ZE"kfz7~%vfFy(VjVNV{BB Q.&?=h7K&V#Hl̢P:TSi,kL9K$mZ+Ԑ[TgMW..$4mk}g&82[ڪA CM5;ce1Z0` =J.>]<{mvk=3 Lf%[#m9Q,'Mُ_&@?ji [Cc#AU=RCJCvmH[ܟ|;z2/\=P=uJ~sjC~<^r,q/XbU9@:[4hC4 PUV #hG䉭吘RP|4 AkLDV Թbh7qvHtz@9J_vXi76AO >e;3ʣ]3o*Y~-ߴmB/9v N͚uNK,Y,**UdjPvlWI'=;ƤV%jStX퓄Z6r)J& iUϢjqZ&2kT0-;`Pl#PTIcb8;٨W}j}`eAvQ e)1M&ʫC.Lp1qɘ=U2Wz;jt (xlQB-LőU%p*ђUZ;_%'Mqҵvوwl\ruV %d 3.Dgؤ b8ۜkJӷ {ގwqɻy€C[kIGqצ`bKav Mn犈v5硵}05JݦȾGo]KW*snp|͔'4*(VyI)a}TLO=AU3w!jG[\Z4ǚdd/ntx(ۥ3QF P SJ61GUٟy2N_^l>v^e^1֏/}WvZ?şƌW,~Jck 9PY,^8Z=9w.Pڥϔ|׏wNrcfVz}G|qOcV>{L\^KZnp8Z"^cFGUޑ\{%^w$xkQ/wbG+{Nm)Z4ι;n{|]U/٠gl̀d2V$ *p9jkiy {>J& 5w-mX3|?43 b& >c!4n>\=[eetÏ!`6k>юbl#$L M4@C2 h^S ;ZߋuH3pz*(YHG|\k枃`fW[ې6lHdz{AI&AebQ^ UJbV(49M\3Ǥzy7b[JA10w.2G>Ig[=FrH$P5G|܅]Ɲw?G7w! /^݀!_D>Vi8L=ѥC& {Nuq:x$xM[5$'2ZB0n1,u ڤB7pH?KzNЊp)U;3LUD2+%(gf߫;^eHnS7~jK+Nqǥ-Gky=ݙ>k=LΏ痓م0`K=s<8-˲] S|9F}qꆋ3AȺ,]t.6<&j,o!4%=h8xf?#sz:+rkܝziCn@|0jPO z;q f~6;a߾@E˞/ŚB h uQ/qf|t]Ml'G $LBؔ^FAi0twjxRRh& 2gE,Է~S8)i VS`3eIs6] f 5D$YdIh;yFE&HUE1YF'PHCw2PI~݆JXLB:c*!|2Ia$eិD+{:}PC*gK@`q -De?5 ̜wѺ A@fa$z$\Vfj̭>L^mKZ}k$VVKQc2%լ ň? +EZCi(,7L"`ˋ+ /-M+DUtE.io 0m 2W؏+JOWg[zHSeBҔBWV4+F2ۺ ǥz߹"03jX'iKx*&:D$wt~}h2}4BirfȊ;CHco?G6aD=Qf"ǣAg$hz@GWՇQ} c/ods.y܅5׌p{IU+ᬪbZc`BjVveNj-rQ2'aU"W"[ymd>u2[⊗&u9R(D m{Q(JgڊVIyAtexݬBRxBv޳i(F{MZLW{%] k] 芶tЪ>E8 ?vd+kI)th-i<]!JJ[:@b"Ʈ"c$i#֮ Zpn i%J(t̰arFCWW[ ]ZEOWR,KnpJf(Tb b-{%)l J%9RV ]!\K+Dm6C)[gJGr씮*D{>e3Sٞ%-] ]ihM\DWXb bADkOW5Js3]SΓ+/́ DN|F)f'ifZyRe$)gh ZS6~&QW=Dg-$)%դ8 DAllvD-]mAWUϺjk? `~t\7FhfB6tZzh3B)BBW-+DiYKWIWV642Cu1{F\вQ*.)M J|]TSz1e#JXjb&9POp)k"FHY21kL0Ml=}[) 9aHYˊAB6]"Jzއ(e-h?RR,2.(܇hm:]!Jc[f߳y0bkt`-*7+fhf(uÂt[zhS(DW*U ]!\J+@(k:]!JNZ:@b.jر2CX;5)^tb|sF`ft14 p914e4(W9–`D1tpy1N0Ut(nJ0IV>`3 ?>Ms.hp|W&ώ?cx|>ӰZ~*4|~"dS,H%HIV"J*,ڹt<b4cօ䎶Ȭe7M3 Y~s5 ?F睿߿Z,4輝q$;O\Nbʹn92*͈מiO9$wQؑpAMZƭr+{&MmFkEfwGg>ܠupXRP ?}GnˀҐ֍8~E򰺲Vzh@ 97f]lu>pTia#,~:EEZyRxW.0PW19|UY=Y:?ǟ//_`/ ?.f=}о ގsї|>S\Hu7)z@T]f鼯s|kX}}x9|?u.z3=׿,Y2-m\~Gg}Yr1. m!*[iV4_ j;>VRr.iPuQYcBĀ.H ٰ"3yN ?[|ߗ Ǩ u}*Oǣp^}]R:`PCJpBf22@LU ቚHT ƹLu0+3 i1%á[1/ t6o^̫7]O[.t2!웮?;^&Zz9Y^|rN4\1U ; y\|`?'HXdZY?%.~Y?@;?F%@Ewxhizz%$¼T;WʳnPᾫ{ ?በ4 3zK, n%11S-2W@&BL"@f)]%4$Δt ڈJb,;A]`;n;FP]1j꾁ƌcES>^ێW٥?7@Ђ6=.NxjY*ӥLu)]$5}j"w3)4VY(~=Mӭ^?-[@e^3;Hj-lSo3 P#):3o&]ߒ?>vlvo'.pI'W$!¯?뼆N&c`]ҞՂ`;ԣ0=k| ߮b+um؉sipFRYLu i7*؀EhIt{S9߭GG:H5u)QH{c89IĤ }ԖhI;n+;sam)i)9[>M3&<h8Cgh:cKnz BεC־aB[3Ի43]xD棝4xY6= xpÝmPؓ}4xozsk/ /8M[g[OH&GA xOG+^3Dfk8j݁#OfvPW- + -+CBءtb !aM-Dul+2g2|t^|m;>86Br;4'g`4>HEu5Okg| z8ά9^?ޠYVe>@D>'4S$z(8eI ̃ uZtp`hQ/E-M " J< p;xM{j؊ݽmMHjXڦ\`7]}Wij8M0]&(zc S݌]d5~iOxk)Mn%ܸq5,Ԓwa\Wr]ݎ:e[Qs9(;`tghr3>0bgܫӳ}^ӫM3b ko{;et+&UwM^/׹?Mkv9֗s,#RHK\)2A^%BpùRaB21%AKP#nb zAyT1f:QT.%,wVz=Sm1r *D QQC(UBRニVax9KLP@$TcH %Pa7 ~ ӻ5K(-=O˳lz5}+*oNy~y7ο]O]_},b '=nB>&T@z@$n_CmyJOzxK5駺gG)ե>>lLBw8V1̭>0Yѽ%7OzI;t[Ikb.3u՝fn{6!em.m;J\g';vyml%b3/d|y[߃Q!!I7薎N[6<Cຼovlnve?Li;jTUղ1ɳM3n&:FVlm3~iB|&Ǖ~OCPM?EYj{*6A~S~qr|ܰ(v˕arQSGXf~-gٱBΣ9jқכ?~(.?d_W>IBۡYe·מ*vW놅^ #A3wpxC-x~:-ld~4~5Fs{z6oȍi n:zy^mĩ[ɢ*Q gdKtႲe%:DK'=A* zTt+į`ګX|OVN~/K-!V5>oe W} KNP.gߖ -P2xLy&erít^]AEo3o$/N?xXr&RD&L!ޠUׅ1Ǐyr)(RJD+Cݦc :|!.%"@ZRLSFAAJމH57Iy%\hsC Rkip7 D;̿ IȅƅwϹIqiDQ8H9{ZÛ9g>^L2gWɖ|]}]:stV锪(@:'/悺>>a=(q+|+8)e9cTH N\O( D ]hKpsFe[gUdIqf>I% wxܐࣇ灘RNɼkWE ~OUTHAH[] V@ED *1|@ (:UΡ\r~;u|.JWz '\FITQg)po2"h%TIn<#n={>B{Ȼy&lkl8P^F|HypC%`S`t+l.ÓeƞpTeɕJx%f6f4 ªbmJw"gq\21/EkC6/y{uq ˂֞RC%#Լd ]2$FųN ./To$ @+:p(pkmH9RUۀ8B_m)R#RN>((ȢmA2#vuw] a!=SM69֩%χbUQ4A#}O%Uv>(f]U\A"KehoPw`bߘXcQfDz&昜"C${BY,T3={ֈEAw[Y/Nm鬳7.U/>Ń^IL R !KwdM %ch:k;ы{]Y*l3:mm䭧_5WC C#kVW|aYU`s_$d\ljQ&Fe]#JZ}B߻WGlS| O3Y) AReJeF@Q[MΫs&Ѳ[R&hLƑ#聝Oa)hVQc%2ahol:tbzi/eXhc0*GknalM+j6,$ =j6Kd/].kb=BQ6(f[4? InsW#9;`ɘ+bwA/ʘ9 #N \I;߫ )D#32S5݂(Jz`=H()pY䑄er9[Y+W~b2MKBK.DޡdD(b)Z Bj!ԩFmdc:@"kBO?23W!E)euJNEv2ᅰw+Ǭjlt~)$Bm.$t6R%@`t5A fs`"EqLivq/׆ľ5oc3:L2:"6"l :撢I{'u3vqNC79JȮzE6^\ Tj>DfLD+ B](k448Gc{*g!ŴFq}Y[{@ơ,&l@MqX5fΏ+%4*D4qA[Xԙ <$:JII1lm b|&.5L+($!a%`"bJ',;a,xd`^wWyl-+Zka'AX6/lJ*hȳ^IJҤ۳Khb8L~.ӴU3sH&xB.g`R)V[%s $([A(H;Ubk/$#IRIx=$W8W+s"=1hW&2dLΰ@D7 ah7M`_.Oj#-QIArw x3Tzfm9t*tNIQg?̌CUg!>gO>=jX|Ҟܒqȓ ؋J6xZ}V:B@=5RLGa:8x4<=o-~;sVa!p٩mh=ldNۓE4U͛m2A=}VWWZMxFQ[:!K4˩ӿ/V%LO.T9ʾ\Ax=^1o'/|!kOq ߾V˵]9 aVr1k>4Ǎ#)~MÈðqUfy͂i/~?_ϗqOP}~lu+0T;o$ܛE{; [ -fh Ÿq׈֡mmH7_.~NS-qqz˖K^񰂮ydvGAEKOרB0xiinUc tcSA+EHo;IA$$ARQ6ꌅ6`7茻h[b~B;D]'}۷Ո}M6f$Y v D /N-fýN/:{O|%|a:&]vCxmؔ?F]lOogXi&JsRH.40rC5J[WUYQ;2н"y=[Y@'_f6 l1)?:bXD!L+@dR跌a A:v5΃'L}6IL:"gzwꋜquRۮG/U@WS7/̻y|"[`pVx@G,8]ٔ6wQJ~m;_ (Eh {\Rq0(\*]6^fԨuNȘ="NU1ZRY >[+JKa4_ )$zGs;8bx>٧{f.l6&-lX*<|1TSƮEZ8EQ&)B>JYf^H7'ӳg }%|6ӰB#rp+^|{oɿAߴ*4ٱ{{V{pebDS7 h7,\0z]1Hx-BL'#kXiPtu-v?rϴ~ ԾZs?*hrRf@2euAdIJT嬵WYg"'O;|97~!{F/`*Al 7??w\| a.t{gmO~mۇ-XlA]fvq>v)0x;2ހl6w|4`n|\/IÚ5+vzo_Fr+.iS³u+Jhpԥ.>q?90#^bth'cl vXDkܼ>3Px 03 grHUJ%Fy8&7L<6lPfT|7i~_mgO|SM$%!7j$D ^l=6>vNPng DIv"jo3家+u[3G붹:ảC xamxm\?SZvVVr..}^fɝQ3MWVʍԩ]f Jk(#;)t<<3Z^xӵP>Ww˱̹=.a |`V\Ժ$9Zlh{lGkĺ.\慹 ,p?n\kwrǵYyiNS|f.a[p~UGe׻߻EOA(W\ R q2YԊ^ 0Jb\@0H9E? ~g`<#EaQd8:. _Iq?`D <"Wb-#,D:iT4QcPG{Q\hJ1DZ4$% ҀBc)KQjvSFU,dҮ?PemZlF\'/Vy 2}Rk2 .:.ɘoGO#{[Pe:p#Nzwe49=` TJB[4 UYML2ֲ&o* U =$dvHFq.c絑1RrdD|I106[S߻ p423+Wѣaʘ-3Qs76tVOIdA`Mefʷ`s1o4K!lIؠ9_R9@oa.dElA-FvTڧ($:epօ"iik>呵Aɐߵ  d^on$T%@(#rHY)I36cX1 6{p۪Cѻ>aXIX;Iy ,l 6Z|xCJZoh> % ^b)I 0*L-tK(fXrC10 y#ЅוE"؄/6[aH`^FM uZXSIm 0% FQ!1`}`Z,14*kmȲE 6m! v ,l/ zLjDN2sIY,YmKmHl}ֹvשrV*A5V-Tbպ*5%.@2&Z@8քꅎ]`1:*Bfw+3\iu Qx0Esya=Ҹ1 ks&)BE(4=ٻVLtJ)+Z Tz[ Ҁ-.L2"m?veB#N6p.H#`XhZRȠU %;h 7CA a ȸALAAXw[@( `5ePDdBELNz;clQȌ<BUFݚ 84iAap#ЭЮ[1#.E&"9ih>(ؼ($0D!N&Mr{u'.*}t@n'z*&%ڇAhRU! 0阊'8$;wŗ,J茸pΠ`Ye>z ]{ #BK|u)}=ŨuԆHT—Pw}C0 {ۣD(ʠvoPJ%l-ѧ@hg]+y r.!z-bB bjwXC-D;f r߲=V> (D td!."(Ψ$1SzP] C VA;8*jV,*Fa!dً$P>flqR(jS,Ԛ}-6?vXIOHGmY4g#MGhTf%Ei*5^ZzVE*/oׂorAVmy_q5,a* z1dkR@n5ns08oKoR7=fښsU&j `һnJ''f= ]Z`7=#;-,zwm5EZSR%8O)O ]j31iGrxg{FзUfaEףJdp  J`Q^Ny'2(;HqXoܬGŰ%6vł*'="($'Mm2X{I;a5 B`|?C [0j`R7JQYTPcD&7cQGQ1w:H1-и'GJFdUA?FтΨ19MAkڙ`fav+5ziQc b]2| R5zgҼLFALPha3BBvZuZ"Ϗ *W!*z|śւ LF8ڢw>:jNàe+J ̀|A5'peR^H O34%0U2'QkOV(T4 %׆&I@ b~q7XoiQc6ՔW KC$`PrAvƒf5ԤU$CtM](I WKh$dip/(m*?!t+<!Gfh BjMn7sq|~u.;"yW}- YQybAh]纐h)sV]OM&Xsi)\~I]t/NF)7_%Ub*JB Pʧ| DC; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@O eJN b:N v2N ?n#8RFv=E'sb'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v=]'UA)9؇878B?z'PJNB@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; NJdJN vj:N ~2N ;zN7b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v='թV|x bFSM鮇m{rjB/pߥXλ+=L wd\8-b:[z%4KO^h+ę ]\kBW6NW2ˡ+sˮGqQ)vnpzX`2EWte>A z2tEp ] ;]eLWO0a#VCa=P^rG3{Ϸ-wxX=:T2{Z}(޿ _ϖV1:gIɸ'šT-Qj?l;~7O n SiEl=v&*0M?A6™'DWl ]k݋LWO޻ cl ](BW@ytE(eztqRMgx8]\/BW6NW@HW^z:aBWV=W rdez:tj3wxx n[Y߆<Ӕ=gh^w{~~1u6ؽܘl7V|qvT}'xκjT7핛aSůt Z:g8|x}trZ_j6*ܿ11bҩ^|oV4}g]9v"OQٷp^ޖ[uϒ#^tZܙ]S;Zm6(c=HF+T`7y'wO_hEQi!O>a<"ѿ$ˡ+{ˮτQ<~7wx> n( ށ,v P++TPnИ]))Մ::TІg*u;=wm- biBL RSaiBkcgiB5di jBtVMGT\-BW֋NWrtth/Ô'CW7>n(Ó3Up[Ɏ/ەEhսtė}|rbkY_aaU>k!ވӺLD}Jw ?3\>q~#Yuϛ5m9~Gexn`/?ۿu{UjaM!oN@?M:hNqVYڐV K}MNy_X0oGgωa}_]esh^u#,w5u}}p/x_HkZm7,{ic&I?7m5ڤR/2 f`:|k_ JRP)qwfkJP߻oQC{OܬwZhT{~hی1b6䭆LJ3m 09!٬;iFjy]s.v#JΎ]w⌿˛ig JϏz}.Z\$Vp/> Wdke؞rÙ7g7oP%k-Z~8D?렜~_*W 'LGᄻ\Àǂw~rkoHΏj]_=ye??wwnﮇl!\@[~po[ij}rzrvSlXM*Ũ{v6CN$s)tBŵ&8Y!ڀٺCqAl-ɐX\Q=Z,G,q;T]>v wGqvC|L '|;?>YoPsp`xY\K_F5gk l^WnYD *W+8zZ;z/+VQm5監mJLcdTϤ Ϥrf-~:+6/<tqv,̈́|g0`YW[b{OW[SҫvvoOgAĶ rp3-.k:'*8ZE5"J|?51Ԛ3Zˤk_}: (:!P !V -(%I:UIozJƷ^TYh2VﵲX}FfiM6525R ·8t~DRmwjmz/:SPvɚ/+?Ճ|nPc >8xCQ$iMdb%>bZ7S2)5'dm6*r.;]/a8ZMG>HRZhv[o:kqƘbӓ~:tjeɹUSѢv[ӕT-;)sS縝0㋠R*wKmVOGH&f|vM]IEV<čܾ+>jC5z}Ck"6!un(ZL郫B զRy&ۥ{oofB ]̝>ֱR2Odd:zn{s"U8ʎ@&Vlo?˾y1{=mw #^# ߋk$0zhwHu3őwB#w>սumbekiʪUÎE{tbk;ٲz %rvL]MNӬuv鬪oOF\nRIUwDJ*${H~b/w8`tuWK'2bRDA%\"!Ah+3rOЍ.*a ,)L%L$r̅x3roz!VmJ6)ِA^ƭ{jw^pDr9 `W>F0yv5zkȥX29ɼs *0tðB]9!ǪAu߀0,2NOM+x#:\,J^yӥJ gr XMUj0.#,tBz,RTȲOXdVZ&L f=(c,'D摖Sَ̏Rt: zÀc+ӹ- ɥ$z f\l{[ԥ/VSщ%gs6gn5Lq-,y/jfiɷ3څ蘁(@jky@BD$8H$[9yTDz9;eB^RGRrAF ;R2# @AqB&iUcgV!J")6kJ6Jgj2[rdmj rCVU( 01'$^` Y'nxUʽfted'ҧNc0 IIx2:{p+qq Tgt^D>t0Z^C־ m |$ԇ_)xS9Ǭ9fZ+)rr)Y+ͤ>XWe p^SX_tc*zS d kW,8!ب|kRpK7/ 'ȄF[&B\x`XSc5IKtZrnr`3yZYqU6Ji"-r9r`Xc]tFv:~?}jN9bc٨ps jN}f^wf#{j!x; zvi݃pwm%g~>bC%52XR6 .dNZA%8Je1M *QPQo/c4w9MiMWg̦3)-h Q W19H98 Ny Lh  rƽb+(f-"N;chX9J̭ @!d dj@򐵶}V'e[cH q @i%3<'$/j9f<[;h4eh-ժ[+:{a# 8 cȢD۝7̝ ~)ĴmoEsoXN4mImSJӋԴou_g=k5tۼ)i8Kf.NжF23WN-q@B\3[zofķukw>'T[V*nD,l_=bRr:*Y?g?0~JnVc_~a()fĉT~\;lr=]hIsS,?%j&z*A*:8 ,Z6.a*LŘ9\6P\8"iaVn举_R\ /%i ]U6`T ޭt?c4d\fP/BI^­zU\ˁ+{}L::@.0+0Y'Z)*`)}鈖]Yrv\[gY8q@q )B1r!C2x Ƃ0# 9.iL=9ƹQ;!yXNUcn_pEZ;@H."M!c&ߛ(hA(U9TOJ4[ N#(w}uWx~IJ"ϿPWMCqkG>?9Ǽ?ylusWFWغv -owym0Loyu>o;<)ݎU닯nxaֶԏb/zF{pl׍cc//wkCaH+Eڜ#?PYj4|}7OH+-P!CEΪdT23Ϭf}EޚشCoJ-f)zr{͢4M}_H{BߝiAOؔnbkWSޒwZCgvt?.͔낏wRmƀL12e/@IXdȀI+MK]fM9.KcvqY J.w]&rgmIwN"9rJ%&+1WxCݕ9_z[{wPԱlIK;3T׮?.0*$&t1ж0jL:ks,D`Lc0B[O~aNhky@B$S h -: |s,w;#gGaM8k|VhIP'd&c$(di8LV3'C@i],ɺqNx6Jh:8ƌ9RK\r.hduZLg-e*U,괂CXj:cjϯiZր=y$<^'0g8y0҅gBw* Z1T*P7Pwcf p iy<ḽh.q[ICq9Ge Kdw.pr {{! q+m;K/,mmZ<)hI4qp+ų:NiWNȼJh/!C(-x>Ƣx%,Me:aU:+[ h.`({<.3H^HBF`tf%J x$^ I`Z]Bݛ4&?g>7'ZBtѷl19^ & fqI?&C /O2}=WWK&K/ȳS':[TW8Nu|2P'ܑ3P2Lp Lɹ5RrO|09_}w̟RFZR\:Gziq<7s^Ys~~08s7G6jգorӨUvֲLc,Kb/EUu,/7_×=?>OϿsmxOK<$0|Qwkv+ _O2zh`Wyƽ|LDŽv/р1b^W=FӨ)Y^]],"ґV@dJ+"_zG‚!:m;mC4Iy`<.їꐢͺ V:e,;yI%7%E2:qʓ> ;^g(8QJ(q% *)]UledMW$I)=tY٩vtbJk؜ΗQsvӭX5O(\Rv-Yu|`9qݧǵC-P>0pV6SKirUѝvӽfwֿ -%\jB هb[^AR)S*Ȑ^t"NH-Kg SeZHɱ3N&͛h>_m݂378?u 4!}]u;>R6 :7MN;Nk7ݟbvƆf䤇@YΠB;A;((pV ݗ,ѡB+k>bdȤѤ2QsЙjCVP|E*VhQIPgTM$ 9RP{ouzt(zC!+bh?{O޼7SO=y^Ώ1 bAw1Wϟ|$3VQnoi®W~hrk\ʓGʑIG'~)J`@ or;qpwvRoV.Fɜ2%lsßkDT[G߬ʁ*b sY]^>ͺ(UΝB7_NٯC㯟kgſ[m|=E㓣b2?=ْd dIfӘy.1|;m\R֬E=<:x W⯏}WJP*qhyhX>ašE~^tHm-6h&hx$oS|uwW-ķ"bm8}c>za␬,I\.}\Ħk+dF=O!Cz9N'#.ߗl]"~J,Z2Y+n=d_C}b>/W 5Mo^;X>2b͋c;ڞnw$ f;?|pib{إ\zlg 7UkO9xJA[U煑;ccgqzpɾ3׊JvAbz{n\a~7ziƖИީ/W$Os)WE9^v|&TX*'[cUԗA"!꒲ ֡q(>^IiZS18-RޓĹA9T ėZ6v;Gt ôPaO{b&YHhn/)W*dd&|۩UE (j ʷQMrU/ς';{LQK$J&I"A1ɢsQ!JZVZ,v0(rьh|v deT\b :Y7qnhg;FəPT9+Ԃ74fN g ML&E߶W%6JpJ!%(z/EW~_Z!Ξ{W[TDI5[EZ, R 1cV7 TFCv9 1`&h+Dhf20Zz`0hvPʦdIغJ7)|u0ru$GFc_(oI`(fqiZ_w3ثgUYePw11~){ǭb[~ܟۗ=&L  *1D3*K*؏.v.s7[n3Эn}g)~"H@#knTĒH^Q29T[ݒmZm>*y_Q!Z-= Ư&ѐ#KE/Kd[Vm+o\Bw'Rا(Yc>M8˃}  LAZ\UɎ5) ҾƂ4d{vf>8F1vѰFP+JDbVD!)G:j1sz+qϝ>۽㗳Vk9[iӃmAt]XZzYTH6 ),pgjI ht!SVJ2JA!; [oእBkUBa=qUSCќKCwN׻sÕ£ 1(})>^/,^ <|XRgI{H+-yK@87^`} Q-1H= _$%qHYcd3ꚞ*`+v_XɃ 0cFXv08I$B;oG0NH((ȍN h$hB딨$s꒔fQ-W!F!H## ,Qk$2'gJ:| Βԅ#]0}=HwZ&_В;aep롕Gl#;'P_q\0l  I"JK=Јh-IWFӍL`|$жԆޡCh+J|2)q(&ITT;,y';y鎁vf+jv]y$*W(1_(~QN8;j}x88M|d1e .K?J+J)R":Uuʥ0Zh"%&\HQ4Sr'+Іn(/W%є۪_ŬA~sP4V}5b8/X̲nT2+H[ƈo=γdrpK=K곜t[N Zew|iX}W¯&#h+j.Fz!R@dp?DWbT E+a/NVMEmȔֲ \ڥ>EvZ|bx4oZ~1q7!f8J9L:r7&j²%N!mF(! o^}Kó͊tH!|+gIH6`vPaY bLG9|w):Y'?;J <.g^n'~|Evh^VR,sk;RY^Cg*ң$#u@׺Zz W*Aybh:\@/K =qG K!\ snsdbJ$RG8Z(!j@,ƌV'…Jg]W 㢄(È!*!E+ 0˜%&(r 1 $O6$ǭ?H>~8νOwW-[N15+LnihT Oads㬜 |2n^]M]S<$ h[eJ:mUGa9Zp*ޣ1'38)eKGf@ИAUvI[|;ACrj|ݼu7 37&URưD/rdkP(c(Q9 /. S0vrW[aY|nIq'-Xx> |zɕ -.ooJV⬑-[wes+7h%"kG+ߺ[ *?c7t٭`C ?ٴуjtm%ík vރEByefzN&5w.nyȻmY[}YrOz,Hyf=kڃcn\w}r_pܭ9޶9Dڞ)Ҧ])qmsGGviQ}=MQ36 L%fW1zpyT)EedkB_a)]4V-Uڒ2OQ۽F~6S :xυrT'p@)c3(#n(*z]zIs)]\Zl2h.30Vr}*AT ]&@AAJDv;+OggڿMtY&]3Me 4u (GAYsH"D{7B)TB% pփ˘f ,KSS?uH+tUY(MNz~&ļKX=u+ԗe~~ג RIKcҐ gz $H W:G]1h,% \-_ 3?R5h51VIyvRMq57V"J}`jufj'JCu͢YPӘ:Hk栦 nWSg}aHd_33_sVc/.DH!Z1}°$Yb%\OJm$WDv`ca-;:}v[ Ip.8dPYq}vik JiH&!l<&9ܘΓM$J@ I;) DnP8$tߓ&Hޟp&P՜(/ m ^Ĭ"QFI$伲TerP#Z. ZS.8c$rQ;\y;0jKS$Ү1A}!M{b]pqk]yP$ I fɴE2FOWܹbDG"d SH0$w 1~ȃMw_l%~S\_׸BOWlj8*e `ˢInZ#jA(h]38< .h6G!xq#;5"RVBE]  n֣j/Ѝ7 pZM`pS\Rԫ+S D/^/.W/ eDG(T-m wULWA#IqE5u;̫I2KJ/aX֒8?)~r6ܷ+w=ή ^0]Y\c !Ko9tĝV(|""Ġ !]Hz 2II$$HZڄvwjzRRh& 2%E,phD^Nf;5hge9A- zXBlitRt:=tӫh+I}gwOӹ9agYuHy[ruӼF߽0Ɨz݆gA.Ni`1!x0nXFPrM>[J;z1rfd7V }l>7BÖzGO ZLy CE붌P7C GcBEt55Qy "$rF,N 5G }P)+ 57D%Ə ~=[-ת3YGrN:xv\e pnn a8 ļby0y=ջGu/1nsL V; U8G5uj 8uZ\J8.pٻ6W~/7>$b,0v7ZѤBJrEVϐ%E6qs᜙>}*$˳I@e$gģJ5ZkSRJ.eu ]=`w"{FN\|̄̔3+u#8ܓʛ%;q\jUWÛQ y: )VBS9L`ΣUQ*tGUMKر6.RΟ'R敺P0F J=>XPqxFLqLD$4Bh MZK|Xq)|)0, ߿Q]5OqN^< ލ>零mt9XTZǽoc޾s҃{vWq f>_=8_D+E^҅MWy8lN5f^Kj!f[&S P3$ސo8<;j KΎ>__7 ?G6mX4q,.o}?y;\lR.8.jdpDG¬Y=O=gi>̴U2׈ӗI'+<<]P ߛ,"O,27/t9T}VOOgg>ה;g #=7t^~4㠩i|'f>޲z_/xqp7,^MƷ9fe&vM/l6ɗjW̳ ;D] QiLLMRrRՎōM]QHF$ = Ah)n`)eFZgp̓$E-Ypa93VSN6s8ĵOEQ⏆#y:$NRB^ q SbTrKJ3c18/3c9R.F!2V p\@&+8;t[ӻ< *;/ FQ+@"FYoR^UtRp^fl7%-]wlڼ0k{\r'Sy瞡$I/`Plue/[ ( wZs!= \=&K1'&օ$hTGeK 2F}N:1c[Xؔ0#Bψ=#>5ڞ!qbX]6,Gy.pqCNTXBiPvєm$AKg\p^CQRţ-i[EHŒX ˌkĦvȋj6,K6EYEϋ=/>#Oygr%Ԡ\ NAD,c5jJ&Nx-|Xwlʇ0n r7zw#*re.oQ4_~G%T;QN {񇋿䚲?^mHo#/,qe a\:m5^N~wIm-.~>-HdÜg[+ˑΛ}jI*ԫ^Z&0go~dݸUG*RBJ;+NY8Pdݝ{X}"oQƴMYdB"omHwZ12\shtV C N cl]åi>g^R='$3̘=H*#Vbq;/H9wX#+cCkgQ类Îŭ Wznݮ]D}8tLt-OړuyvT-G3eu5X)0AI۾,^\HRZo+FXiQukmtZz Imv_o#,*&}dM * @8 XNPHBu6gmy9 x?y>}z%[:t:܊] &Ҁ_s#**|NARvBݛtv~Zi+ MJ miDi4}4%KM~BUˡ+tъUF){Uyth.M?fg٦Lsē5ࣼY]ǃٵ9=Y2wNU}>Ht <(*JYoR ?mAr)%F4 psF ܜQ sr(!BULW*v(Eo"]Iti=%YOp ]e2J/"])E@tjYhYu%Jwus}1{I샩7cc\7paXX}0ŕ0΃*VZ8A<\-r[ؚ_~xID%?c6H)'zKjN'90^^\]f!w49tL~ّHR gDΜzV6onyˮc 2Rx](p(hw^((ب# Rti+ڝq. ]etQ.wDWjͦWDiAu3]WxJ&EWtzڴfU.Kdg*e+t:\p{(5QokbĜ rJ1 G {{k M#`Mg324Q4p!J:CW.D˓vK*>Dc;DW-Rtrhk;]etut% %F:CWBUF 1xt%FtR3tΨ+Dwd{(z:@҄p蒺ʀ5t27+D ɞ[JFТo7C؅y]`Ft6%뾂VU_~4DЬa3eJ*pV6F!JUs m@ԪQxA ꝫRȜ(ךY +6"OtiEl# +/e_ (y~ğ^1!읮^x+uhviY4 =]m!BF20 fNzw/ٴ(*M{vv׳ƲG\ 6X3|$ Ci@YWUMI[K~zZ?ZiƮ+Tf$uvM;ͳʟ LH8a{Fu/"'|AVd ۞Of_ڝx7(Xe\.uigź8lswkE3;K1_x_EmxWo/mGM9^/>OVz]xW%b*I8XVNϷ?\?tF"z֤3 Ntz$6RwUd`W=Ŭv^ㅱŔϷ@M(ڣ?+BF*By >)}:af3ObJ9jrkQd/FW})bZǮ+tzuQ;okBLJ+I) (p1,%_-Y7G ^}ROƒg^xwwz$S9h;7SK/|ߜJ+4msV*.czVb h9r?5i/\2)&-j+5\ `Z7+-|jѕT'[W8vvW>ҕ~vdYmz .HWV2+=huJ=Xg]x0m# ̙Vi]lУR:# 4=b4͸Ri=֊~c`;kE4.+U2.f̴Ǻ&+-%^b+}M꟮+sv5E]!UѲsv ʹ\Ciue$pue슁*FWu"+}uŔz0E]9.vT]1m+P:af]MPW"Q̞&YZ4|QwwRl#L?k!+&)d\I$LؓBt))&AK"SbtŸV+uSۗ+ձթE[AǾBWqDh͑x#;zJͺ:I{T؄btŸ~?ZoǮ+avtt#۠E))H-ZI Zz7N{Q7P)afʝbt4 "+.HWhqѪ)͜UNQWo ZbtŸi@iu5A]L*(HW ]ı7hGKfbJg]MPWJJ 25q*EWL;K Lf]MQWϣyػbtŸ+RcSJ9jBI-ƷR&G#긂-r^A3ꭆz5,g{."bn`%7h?V׋bCI>o oq7xcz6]nv?vW>*%jߖi}Z(yi JZؖsq˹i)&&K u̱AuŔcb~?HGx(cc^r~q%6&h:ަ7w.^wA?bEDPe} A?Gqd? _ן+}Ǡ?vH/.V)"/D~}joPuX MFltVkۣ/ۢm uL緿v᛽Ay7{kG_P|ҥPg~ݧݻ:໨]AOي{)"C(޷TMo޾Nw+Ӫlna9:j =+j8aQ7P7Ue4=q~: _xs*kěoVzN%ygUQj7Wu4TˊlUT)KɚH6QTęyws!IM+ćh \ 3@:zM$DAgTډ*Dd(djr~_W.*V|V#T!Ťu{HbU2J6 ֤'jӡ#HBWaAN7ʦ.7F@@v4ۚ(c#*r2G {iu$0GCҺcΈP酯p Z9 ^jYgHʵAX55W%ozɠ, U^)JFXG6V#Tئڀ3.TFoF+ E[N˦F}Vu6*` B@O{ +à4lZuTϢȢeB$quѻF5J" tlLi,>R ۠-zS+2U"RIX1B'i<̆ ӄkk=# 3(]C}Q2MR, _)_12̙e`GհʡF&+(C;lE،T ]>a4dc aw5L iHXDBBY LdѦrih4F7VkHTa>5¼u)e`&CFb: v)0B` ;3ںYD,2aG# Y/HJd̽uCtnAc^Xj NZZ8; J1<`ʆ4iOu-2V~Rj{Ь IISm#HTz^}Y#DJGah7E4L6"-(81N`Ax0k#"o0X,2@>,&_f I-I.T"i[DkqH6n{nu.UWuDM8.dPg:c' Ҳ_hw,3f̪HNU/&08G!N^]05&z.ˮ_=I>xyS TA&'z|eM}&qh}pk/^oŸsȓR`QD6 ]{ #@ |um^.-ŠuԆHT&oPw}Y]E5ʪ+H Hv1E`BAP*(S`inj@p s^ϐB.ds=V5T3chdEr[ҨyNH Yc8Xd$ frP%&JI>@2~ȃ!2ءvGycP)EF WwE!ܠ2̹+ cD8gQʐBPDYl9i@͐ڲh4M`UPЄ7 T*[f=+Q^! ZH̤E>&-K `ҹdR@`q yXtZMXxf]v5]ˎsMLnۅd PM\!tBf=0S= XihZ\PUFCk b\Z~}7}AeA]AAxS!9njlz(A#dsWkV:(R(]&d*P(yDAЁJ$5HOO JHvXoج׊a$ V7XW*ϕ+E@sSN#:lBgt+F-T,J1<@JҨZ1⓪``Yp`h\c#dn҆E}ɈSZ]Fʄ:HkЬU gF((I<(1SA"ՔqOk[H@ ƛZ TfQUqdQi2 `93#aвf@2v_O#4%0U2AkOVt(TmJMѓJn%EqV;͠1rIbUt "b٥#Y 5vK8IpPbɡ.HPҪ5]w*;cgw 0H5aKBu_f)`6 BJlLu6ܴqU(N߽~}f˜΅;elN1~vEJKhvv Y[7^ۋn|=]|tH;~w;9.'?,Vkz}¾b/=DE;翵e_8[3gӽ/ix o÷C%\{'lkuko[پ*Zhk:~RKWlwJI5'xH8r( =zRtWq9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@2'z-: Ͻ)p3yB<$JHRzi~"'9rG r}6 9BӐ@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN rH}='P] >'P8Rϵ'i(=''KtI#'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r:pH62' ='JwN T@/ Zr@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9^˥TV2-5mW// םJɫͺIZ\O0ljL%1.׶ h\Z-޸0zƥϺ^pc~nq[Hm~5 dA^N;&}&\!¦K-/+ß럾v\7Be$в@?OR0f[lr#i-sipciB;M7҂htsX8`)hUb,tj}tPMtJcǷv+5ZnϝʃyCWXY6\mX h={uVztL2Z.FCW Pc~9wj(K+k9c#9jphjJΈ^"]9&-te}OVa>m Yt0Y8/]ݬ µr}Py۴YtT&}k~k~}B\I# gm\sᔫԆvz;«6.Fȟ4v/ZTwک|u]m͓?(|4:_Ӵ5͛v8eHO Y{m)d]6ψuRةMC]{Z9 ´(tV17{. Ÿ۳x[c]5㹡ѬZ~ʭOCW]_1xխ6ϽDy4LJ@W]ϝ\`-GCW ac_yHWO~{QPA,YZ:]av4tJ>jh?wj(/ޫѕ(CW  ]5;]5]@^s9&j 凢oV0{tPrHW;vk?jphUCΞʃEqDW/Nij\F3stPՋ+$w_2zYޕrV]yjY /@,&#&頝KEӢNEPdR2ϬfOwt|4mPqGi8}F!p?K*ӛ!3m 8Yk1umddnyhv`Af?,T޿?;TYcYw~Z^gM>aicnjs 3g<4"yo3ﮯ}ēgNl!`wvˋvƢ}H:Dke&`>-RGja\ZNVhQT;wo=}f8-(Zl6v[wvw̫l>>f\#6o6nBO]| c_Ko_̮ߕl}k?y>O 4J3:aF]~-#VdmXЦնqvN R љUHҦ*K J3Rf;y|k"sc]?AEn6Ah;2cw@ď qP Ӂ8|4'2z|R_VeYy2ŭ'O=`Ή4=pifwwtfr=6.'2W)/Ipb7>fy yxW?O 1 Qx A:yb*f/^e,V _g\9T^Gns$u;k/wyQ:{)B0o߭tfmvOocÛ2q\ˏ@/d=ởc]Ňa貆&?[Dcv(P?F̡lYG=:@}kj"E*^uwؑLt͖#]ɖ3)-偍q(x?pⲻyi_ @Ƅ:T sAuB-Hp}zRs^|=žzGay>ZC %̓l;T`K Qh+kBA%"~(VJm7xs UfrJd.3 2?<2]-}<SWo7·oW-}nΓtַ:"8:D+W?:hMyD5%ec1k&,aWq4 eKe6JPR ȵ*9IQ!`p;յ]qn?8Дuq|p=o? >R%ը ^vGOk^}XB3ծ*6|In[PfS9E\s1Q֜ndUCWܝweyUsLeTk!N.,UESufQ jsPZVǵeI9R1 s 1Jm j ^QE`pxS~ʺdÁG087JBN!{XN?{WƑ Y:C°g_V+yDvs&9$[vbjvRMJeXRwU2Edx9KLP@$TcH rG03A|PקPG rr| WnL~5믍F-OjH2[1\QQ/rdA(<{J7yQ5a`˟qBŝ],{pm{uӼ]6?qd#k~ :*%~<]t0mJo;{&nq0oo~;_u+sZY7*[ߺͽD$m>Aѭ3f xBۣg*]j9zIcv{&FC{5 qd#[Oky̼VFdcI0g󨐻l{&nl<\GL{>k\=jh׼W"Tw|I_WGn_\n{^f8BSdOfPɬ)&+>TP-{W{s_ f>ZKшx3Vmxo.^>v]6^GkB98 LD˔1Y  yMwC |&9sv>q:%|v6Yvބy!jkTuޠEۍBZKF*PV"^yRe ^$苵8_ >oXw"RMR^ ) PCy)-nL{ǜ%*vF43gJeĞK+L-?VJ.V`+MK4|( WM^ŽcsܨB ܱ@@ @+2<:FÄ8LX'pp -a1&JP"RIhtn"V.D/cъ͋ҾͶN> ]0IJEԌIE#.%eKuI뤭M$B"izjAVTn_1rãZ 5dpH;#gN;dsAz_],U[y| AAU_@{G$ IIJ,AV"J*,ڹ>[-ygN-ߓK 1Αj _BFCX|ZIz{?Ji{rɃ 0} *B48D3aUI B5Q4Bb×S Dk/y;k "3h>G S8ϩKR=v[\}'9! hȯ| 0=>[;pRJ[/x6< a҉LKhD C4hjז$+e^Q<*Uk<ݩ!ԆޡA%>8 A*s?8dyJuBeׄWA kT3Xª,Nh"%h HFQTqʒ('z/&'~BB_ ]h"]G >J[vCUhw" J< p3xiqs%eTYL,H$,`IQ#jhtiv*& Iobt1!61Q 0F׽;W@&_ i2~]Ts,Era1*HJhӒ፥2cZjpq f :OUd4 Mo3~ ~IדOn=wwh]Ènߑ3NϺLoNӫǦg[a{sSXYN4mʃImSr2{Ӛ/e!O!/#Nn<%mZ0SR!tZBQs행(Ook!uhCèպ*iHE^FY}op=(?7kx8gf3߾{1~fM)^~Q:S,fȉhT~:wlr3ݺi_VX]Ej&V*EH)u= e4:Ǹ 7XFvq ěJ& WuՊk];'?)|EHW.$V]TfᢎYg=ݨ+HO[fXavc~ [ٺĉ7E7Zbw׳D8{B5VG]- GC~6/t4Bk^|2U'&|EKղ6Ke"4MQ]_X~hCJ,۠u^*˜ֲ \ڍrO|}S{#p<46j0TrAq;bOĻ =`:~jfIj ,PVZ'163f8_I 6IGc;O杲.-m&oVnegC >ZQ$$69%1TX-Qt1"d__ga\߀9YS "r M*'@!EC>\ڊ JZ gh^90ꤊh㍃hM'~~|+ O^Wq{R]A UL1v_Ow(!#P5Jb+/ӗxN 0ufXD(WJL=zG:y-eNjDp6(9_4"Zyzz6uxPѺ+lo:= F /W0,p\" d0 ~}[}۫_O~ LV9h_v{:v3~wMWwAk̸f-d=̚ena A=T|Z\ `罥ehu.F~}XTͬOE#C5(E{3amOEʍdIh7aȩAay7&QxT@c"f@5F+Az(zTa18T #=Ֆ=r7,9 XF=KF?я !BM^]SC^xx?xɬ.,}$o?Uwë^"߻7 b ד9f#׊/?Ϣl8 ,Q)~"+j%Qp9x~#^]zⅺ^0{@uٻ޸$WP,r>l,pXWEgiHNUq^45DiH 4C駊UաDjgqf7yu[^g7ERܰ칽b;ߴ"f2?rh,ɜ;Wחٜ|ia2OTl{wO*ބ]-:ծGɇ˛/' $3{?ڐSaRZ (RQ8,RkY:#LN#`ES#.:#K65Fc@*tL4:-w7"/8G)Bܿ wmy # 45|k_[Cf*?dvI6m|fBݮwՉU*i:"QZq+U皂V^j\0d:m8s; 4{~3xg/\VA&DF颰kȑ :@R# ;gAps-ђ HoV UB:ڦ*x#|8Ou38[γv^ ٲkg 7}uCzIT 48pť\Lr . -xzOV-2m!mQMN;驒H~MVgR+]~'&o^-&EAsN &! E+zD.Қ!d>Ҝ)*V3G'ѫ:{e9܎d?g/7N 9Lr]%@hN1I&J.3&tQP@'^X"ŢF##TۮĶʊ !t9z!ZJEx$ ^s2 ڐQFdbG\ׁ١E$38[v[ioBkȾ8y:cEg@IdA hٶdrq ' JF[ B RL:Ig39Ѳ򼪂検-95ۭI:/M0գޑF3b5 p~y^u>E{ReR̭{./zknc$'k!jBH^M2ŐgDNF -;14^aFki@rQTt2G2J#EYގZŪsJD"4JmH;N{5^iRH( mܠ RlBUhl$Id>o+[E\N4~G5mx}z!+C9pR&ABL21x2ޅT|̱?_}jX~\*Ϳxia^OȋJo2m}f\-d'g2BIwYNGa6+HalCUiՒ! Q sapI8_K˜^膜7mI [Q^1|)E%OdZ~k#T/ޖw%];Z&:>~qu%(A`ot:\KsG~vui_״V.Hr{=uM'9F?a?_x.Cvqci=;=u~o5۵ vcԗ枕7FM#8G:48 yY7h"2h9 E^/g7zv3g.IqT%>?"7Z7tue9Gx` eS_ψ#/䑜\ ?Is'dOv:d|CCkouyՋŸNtyƽcKoӴZ[ ɏ׿;ߝw|׾-7&yŧ,y_!4IZE׏PIaʗsK!4ט-NoLt[M'G͒p~*1X5.BߠNґD&ȓx)V+PHG?K[zOS_ 8iK@Ud$٭'+$dK/*VS#'r{N*3x&y34.vUc~RwV,r/]15'W\cjM,>4ђ@#ոJ[wUY{ge<ʺ)fX2 [5Z 1d F~B?8HlB ݶq(2=aMزGE N"bgIی%-΄+8_I3Ko{`gUFh,6zʞYOҍbc9qTG{RG Q^Vgt * d$jm2) 2(isͥ]@ף+3D-!`VdhɠY(}լio5 PW%Fj5T*zSUFagp[ L!˿(YÍkZz?]j 82T|sO"Qpʋ;ۼ_Y*M.|)A7QX)<1[FktI$ȆDVN2"i1%DVtA:SM9W2+hBٱs@WAkEɄ#CZau 87CYϹe@G+r+aO>j6So'g\#^ )EDsFLDԺ X!sTF6`w.3qWƥ74[? f#"c1I_V;G1 oyrfkvg,9/H({vp^#v=ޮ}|5hS+oq wχo RČzu?zU  hW 8}^Cz}^%qU 0b}+KK;!9&aM5ד”E嚚lcФ'.䗟_~=n|.@#!Ä*c*4n-vi`h MZw3M3JeF>@6'DW *3C+Fw" HWHW Cr0N̎ޫ+FXG:+l ]\'c}sNWFuut|CNZ=bz[߄گN >irr ˼ݨyKc bZ6Wp2]٪rޗNpR3b6BXزBU='[ O5:w"zyy6 xg_JGK7Fk6rme9)cW1S&RL>.&L_5`&؜A&$&ۅ㹭j1l41I`!bS2Oѝ( q w8oQч=DQʡR CW*5b>,jgCWnG›Nwm7KWE ZKj]ʍtuG/+{wwkPѺ[HWOBWJz A5tTZ qKQl.Bẵ=$f~84pBӄ֮[~iZ+ ]ZrNWRّ!+ r0tp&ZNWҏ T+l`:7b{cA+1b-tn+G 3Ȁpz3"`Q "]qG.F9>#X,΁kڜ.+`>c=Ny@FUTcM(4:] NS1澊ftSiƘdwgAfJi4Gn7;)ƦƫEn&yT&hؚd2\h]_\2J/FwnHy 1+bz0yׂ1|oF wž,vCkZ eԕ߁HW}:b@tŀ= .+FG:DRmkDcᏕ:JFڨۢRn@4̀-vC4M(ii[!J`z5b ~+4!뽺jQHWHW"pw"VCkq@tEp++Pޫ+Fi!ҕJ8Vj0tp`bWv_f;ҕ+?z(텡c-7|mnm%Mۡ`Ba yChUE!Cz@tŀp^h2\?t1B Qч}>t[>z< ͝;v {~ZtPt#]KTB[iS}+FHWHWJ Zz\3&c$(oẑwM3`7f^!DiQ*iiNAÕ8bFeZt$tES߁]1`ԃ+ ]1Z }+FHWHWVz@tŀpA ]1ZmNWɑp`@tEwmoHx4^҇RZ'oEI1%*$]o XQ)gwO1h dЊPJʯՇh s8z'zK.i]>x&gBTv7kՈ;+{2^eʑ2FTTaU*1?n3F*]kL-jU99V&;/Q=5]8z0^ *aO'㱿|zZL/OF,& ~|CwsXǤS {6Wbq`Ta٢+mQV(S5~wb؂_8"i}FZJށh%~%&l;(57弽?OK8@}Wz&d~˻ҤMo|r㇦?[}B#pwxfu}/-3Uh Nxx+ 7Kp xtWyT ?]\]gln~mA;;ul .OԑVq±hG9 mLg-.ϱRq2ݣ)³cѤ=RGS熾 %m¨u+ ]+?S~5lLffA9z8Q{dTzE#Fs|;0"wDa/;kd,z-Sy}ῳWDRY+N.O%$JsGWXd@3tY]"]ds ]Zec+B!ҕeXA&A؎~syuc<#ĝj$ " ? ,#2\ЪsUewq<\t\̛Bk_gA(tt=1!?]ap}qZPhLWOu=$DWئCWT rc+B۲LW/BWdf̉d'R9)rb)4~1ؾO) C+L4M(!AҴ.M.CK90V$CWJr+D)]+Lf}^W?_?L&׃?y3,ߞ vvz|3)n1zк/N{2-TQ [zc˧t87q[Oxy"o}ˏ޷5`4\&տ[fSÍ,Rm _^!}WkyTL'Vv/M;*oԟi=:+~װtI@v|-ūyr\^N䤰/"qoӋg4c>+ 0> zl|a8WiGe2}>͋r~~lvpȪ.*&y귥Ńga؛?WҠ:>:z~=`sM/ΒSv9#:syzN ݅.!"xneI_i[|jNE}#,kҎ\Aa 93(d!乏g#!$tF]ȁ'yRavw6K<|3fO%,gWl]'cZLYMxvK9Ϗ/)ma:d#l}k1]ݏ?R%I>X./o'd[|Wޔ>F)I8X^ռremUT\?n[c X=P;hF][(ִ;MkycF)H4HXKc;1r42R޶׊ʖ /sWoӴBK&v4S2ARk{9u>?hTO=[AINnkZv\J7X3dx<|å9^6ۘ:Wm h]8yeσw| #-*P>_k7J$^'$4ˋ9=~mou"o93\Ñ8;~"E۟fom}a߾nzsue_~}۴EOkګKFCwH{&HBq>-imj%U$!wV.|α䇊?[=XsXފoV7 %3y)Ī_^D<xˁǛ1*Գ!^3`ULbv;ectsEa[zJAO㥄F{|Fj==ˠ3;F}f]%ˤsy̸ q'fٝl{rScr 4LG={vEojbBA^6I_옞ddȳ33f &]dsQk wvVLGڞ\d QK@cT20l@2j&ȳl$l$&OOq2>zƩ@(nmR%V9LԬ/0?fj̋հ'1ـ$Md&{3EY-]ora"x;19S)jVѓw*~>*zF7L%I/WM$(dg2銰8LSyKvw[D ^}Bq'&=Kf 27F2;]<4+BF :g5>Ɵzt9m겇"d*R#eT`0\\\U*&#RDݒ.UO|ldk*(34.lۢiVWh+ď[b.]ЙU>k\ԞY$wwlYX}?(T8m?{WH᧙3HPzh`g{0=/ۃFl]Vʲ%S$ Y]%9#AhrXpcGYGJr9y#Se02Ii *=gFVغ5ŸJN1:0v^sZ1"KdQF„vujq*5Hc@fNPaa\"3`:!ySmjKLhZ%HIrys/uľ㽻muhY# oVg;cYĵ,M4+8K&#^!bD{:Q3\Js<ֈow!e>aiZa㌺^d屭|B .g eC1rng==^Ȱh#.t/&X\\f4Wf}veH\+Dq*ϣ-Y.'a% 6Cb.7 o> X!wQ/ƭD挻.bQFypB ύthL؎S\:߹hWjΫK(8XBW忟&2hjܦekf[,a4?(1Jܴ^law1>})]:Kf߿$ 路O\|(Ŭ}7 ik) Llw"mrm?C?_w *^a3ogQ1_遺?3y[=Xb[vW#”P s4O~Jy&gB) Q9SDjX/ Ǯ 5Y&&q%DOiiTUm߾4?_GAIdziO-/kX"G dYn)SLiԬ!(A ᵟ+.ti Ls1o;򌯦r*ME:ٳlaG,.#ba8Fjr|’^76Q6H,lO8W]Ok mA`gmq40cgӚ{mr{ ClߺȊ>2tf~#vO 4ڨluT]jJﲧT?I"mV^Sx3=kB̀1r~8LSIp1x#2ڇfmNYEE*.f5,Nu.fLFxqRw~\>jy~d#ձVԗ沏|3Mw*Ez?rtYS1e\/kJlMF[~Uj[Éժ).Q^;6;籎̉ұy6!L(!#%2ia01%SyBhѢׄb F$g5|C X<K|9\2s=HA3vB΋tu,bnZtMե/"5kHYvFΠӮL.Ԭ[cO:g9HfmN7%ڬf&)b6et:S,OF D7D'K1-!ׂ|R ?U>&A1圤d2GI)͝HW'}mX80|f)#gc5C@/juF`UaA8O*5ks˭ahESNJbxz#Nl$E)IVWR @PPj՚D|˜W[,')/I,F>6*1.[9H֞8T XDjAwvKKrP31lؔVPOOluqZ],_Qq}ǃǧQwn`rgDHLL0E:KcZJׇruouO@w|)5(I@/wsyCĊlcҁKtȮʹGuǣjH'{6p0ac*P|1%Q4mpVH Ӽ;S 9)kNTw Y^)1M9xRyetж0IVy4Edl#å9VxA$(XS{=oQ4"D#05; c7E(~̳TyG3ځ-3(A#VK`0 9th%de}Sڛ&]:lDZt@ vHC2PۺzHoCig'u&]Ǭ\q,<ϗ>X>>~Du)ܑ6a|0q vKy4Sd;\,nD: f/ͣ'~ T#nJda?9~ SW`F}Q7>`NcRv-X!;s\yZTJv*je:;c_%1;Yv;(7"22B u7w)1T+~ \+ƨɣZ,C]*t†"2+?=8L3NC%$\ PIƏIŨGjRYj֌rWm&oeN;ߡ )Ɣ<8.Nܑڣ"\A{I}WB$*0+zd%&v.j8o竫q0&A>zX/ iȥ!u~ >\uHHgA?;b).zB \Adc06pXo ';uZK҉nP9 7~֙HMP4dw\/ 'ݹV^]wݦ{3Ftf8ԽMk%eBSEՋVS% ؕ59PL=CrKB#<һ4o3ܧqFW.KqwIi,^-Jc)cѳK "Fl` HĒt~vI fJϝ֩vG6~ Z!} ߋ=2`:P ؐ{6*FX?,ظ k:ӐjO0"y{ƪ&yg^Ɏ/V(@֐d~C7]qCow%OD BejЕB[ H=<)5tBOa1 80`Ѷ'Sp@+ݑvI? % :`U ;K. wzg-C{tmK@ ܍`WKT3n`mK$S8o{aj鱎W$WRvwPAck!׼?=T4}o_u}k#k^}%/Lq'vܷG/=ϼI'$J $Q\ Fj`Ebqr:JQ5s&dTG;'#O+JK%TNӅ1~~ 4. <ԧnʝoN("e1@"4\9S ?0d+M{@DwNyahuB |uR0[;:}-ݸ$'د !ڣw/hjD7iJ"HgEHiͦE1rb&z).oo͋MH$ '+C=J `{RB|:)(I{ atw?i tv0M̧ހŪ˒"؍ΒT%T!l||Bol&S~0QhfZCVı(ݴZAj-=>+SDZ e;3^eEt6dp˪E'nv"}rcSUh.o vcIApɨ>+YTisId~β2r;>Lf8A=}}BN$ 205FHʱnF*pz/$gF6s/F.LJlMa[jv#[0B5N5k1ߞDSFUm᠍%BT78˦4xqc6J6)HiFjL/kWwU h̔b-VoEU=ASNq @|.-FWנF^KH |kg0' s}`*lac{MZK5g1r[t&&N֭Nw TeWԲTh͈g E#9eoyvJ(-%V@Co9AS5LqWaUmlrUS4Q#KWsYϩ?e f0 A~-NVbrތկ8ΨftF-V*dz2M-l3RuR%6J1է8l] /r_*t0e}hp ʬd,VEa[\$KJʤ^wad$x GIo<&g0=p_odMJgL-Gs46}/4?*:|.lTd,UU栒&U#QMi1Z%pciOS},wWNF fOƼMOsܒ@&TL#%we00g}P\[r5mb3dN 1TxxgՌWz%&];/0\pF(?M3C.VX(M 0v5SE`: %W3W1fFK&p=4CbOYs&,h־dzĬ%'j`yUgͨ-ɦ`ٲ__mF ;~%}7j6uLh1{b=rMUX_$]f8fmg}RWVamWȊР\+vԙYOPvei"i+GmFZht}Wn؜+SF(3(DyjbA/=чl.5t+Dk4tq[扩ÀD!qM(67 3xNV >Dr,iREV^R٠Ti Ω-K; h#\Eؤ h.BYA-[T dNI[0RZ 9DP%z(e?VN5-9 3.r.pU(lpY*|J@:mo~a/zU" _^_ ƢɔDFJP8T@c o{ n>dsq1M%Bt!!sRi\@HIâPPU+lχ̾KWZ%bVɵZ,7)`q"CAr.K2.-XA+z6WPKgd^SjmSOz߸ C#`~@X8ї@mχp.|w2kSm?ǑM@Aq(22 YxXZj#ȮB#OїO&gqC^ Vf%GX3Ñ X_hB \+) 1Zno V>bUX%p?!Q%vZbWL/۲lVkȜn ;+X,1HむbCZNc8 VXonAׇh>)W’1w^:*cG1 RKǔ bքju-*25cCXN=|%|gq^ jb͛iJFp7:S[9VQ@s:4b4u= G̽4yʯet)H|2~b4̸EQBԺKcN:uTlO輢C7pxVO+x4~z^Vx'4~KT7؜]N85է'jF*gn%`o)JIkr֞>|1bpzyNfM/a{.糂΍ *ݔ4Y"Fi+3>[D?`Q#ۊ)H'fw9/>Y@,6YVrMJ%hnZ/uNpg$U"iU9FJKm\^@I"b|l#5|)2 wJ ]^ CBxˊ{Cw^g,'H<ï54:zo2tmuCmh:[)iv5xK8T E<Qj8%U.@Awe~fϤNK Wezs{H>DF2-'Vw`] 'F`]\jruNLCWOMX J%u&[};Z蜗?[Ѐ9Jsi*uR%㻗2(r #'U55m_(_Bb!6Jt.09z&P?TÆD%V#c)GQ,4=x#ݴ?}O>cfT.X%*x5L(;&pfm 7E᚜qFXOV *?}p+q 틙GU],!iv/7EE`&z2L6fAK JtE ;qp ?I&;:Kw2q# 43t:C25/}G4ހ7IpzykGԏ֗u7u:x# cylĞX,|V%aȤ"+4\f} }>a9?b g0|dOG~ػ,JG(b+&L33?_ػg{b!|#Xh0sO`|?}d2\2/([+{=i[I?0Sˣť`Μ{v~'y۔F6=}8q gzC؎ޡH`\Ž oAUagd+\mgNwOD|+ ۳EߓM=t^ran ˳[^w E=>Do/3Wq7n!wé*LJ,2*i32t(`[+yns7_gVb~ڎ"3a2g mP6;w"RS;M: a۔,_6fX7(q(&B`: :#(),uh?-~×`%v} +9(^N~= _+gW/MV;$Oߝ.XXd.}Ȫp)8E{eH`4J /@vUJ.,F)@.(R"!##@XY?~AXA&b>0`&T%=Kr* L:.]*Q,HkHL*,0zغ4QnaaRѬԇJt8rn{iWM0j0}҃2VdKW@NKph|ձp\N!.]T 5!.-S*s\4|h!pufe..}yKr!c}klq,pTDa>S$z҇XY ~/j 2viQJSѢKwKTDm`_^T9gbܽ4C)/(#8x>?GqxSG{w] R0yA<+8{nf1fpRiг.Wen/Wfl Mn> ӽ=!QLtα#s6q'sBed+U ewUL031t_dP1%].̴l*\LfyHӂaRCa९P8 ygX 1yApV*)ZixsݾHMXq ct=k qw-F- H(ϥ2 +X4K'x23Zۀ H*d 5V,1q8EpC9јg-Tʂvz%ka^j7idKSPѤ)wc}Y:6/NaY ǪEjN2ZG_׉' z,{Ɖ֧1Arxs]I+œ2M!! \bڬ(KKX]R0}>dOB7/W ^r;DEcWt)"\5G O| "#.蹗bvTuܥ-/,Rk8'}r3&Jp|@KN@ F(fM]BRAoChH*FclUKUK!5Lh^V,!maXK54GTA)%;sbPc44jM%ph(NF5+f"Pҭ>d,֨ºח ɦ[m%:,X39ND8 ?e oȉVG.)NEmޠySuS Mxvx R:I4\{j a 4'(3^X{S"b `ir"5T!iSo&a(b=TZaRoiQEl%-Իb|KO[).'$DaNU ttc00gLO^-gGq=`l*xU]HDC@ 5mKJq Uz&hA E g!b&qwk'cnu] F=SL<.UF9AαF 8@-@5v4Cñ}͑I<ߖ!EE2Rbyx0RLJ0!ZT7чIrrw U }# r@nj*q3{7WU 3+x2Jh>#/TyEN[|#qVU%4zRtYR,MzU,(2K3TBCEI* IlُSǵeN%{DÈ= Z0ka{s-ˡ2^g_>+c'y^BwRW5G1p~UKJX땸 +.7$V Li-X]N06#֫L(HA4x1{X1rՎKi;4Nt)'T )yrrD$\oC&l-[F *qd"aBR}{-(WqdwtӼaձF?5H弛)M_FqH&.q`nKcånvK4%mBap=9JoEaw(\!S_yzuk*%gEE,){XBgDޚ յeVK@tJⴎcr fXc*qMMRJ K߉…75˲Rlvm+6$CUQk;8u%_, !S JAN>a Oc鿛^h946~yIaTv7c<Ϥ+Tb_̕b_0ǧ|QNZ~|3_xar9_~nx Khi%K{J}}Q+qt#ǎi5UWS>u9i=.(]m4t+8t4Vwh4%l0*3 h Њd dhAWMO|,MX߱>m뷅J )%/[~`օ>:`O؅<ĺםRxnӔ7\HSBi]?yunՂP*CmOʀ)7fmikVf2/_r"RT.b<^B6|f3xPUn5#ҩګkpr$,%GN-}OL1*{`%2x]ZCs)m6R!6J,p&aJc- BF8m i`"K5a Cc|$-_0!f sM.a(WX-BފJjW_rg]`Rp{}+Sp8 Y%RRf\yg7@JUC+VoR&r:[Dmglx.hYβ=J(.|RP4җIvL`BΗM= sd3^>d%4yO.sΑ8қlG9bDw}IRchSt5 C|~Y%4z,!r(_) \Ƒ33nt ʥ(˹5Ђ,<' f-zc1]WݘDv$ڦoVI#*b0@aEL/uۭp V͝Dd'U47ǰzpBxxSJZdb=j2'BLe INqm2b:%ƙ"Wf_+!1$봬+C-ڒz0[`dHe8g,2ԤH&^\jnxRgIsP8vp_lYެX+R,\nVNjtU KRcxxVF*p\4EDY^F Bo ꎻPBoSz>ƹ|i%&8SB4+yuźUP6bst{Yw!ྲྀVڑ/=D%cܴRx!*]0&/jo!i|@6suDЦ8pX;й? !T3$QB#Jc(OPJUiBh;s C{1 4`]Œ5iEqxo?S/sNL4ib}z(VB,-XZ ѽO2"i'N!Pm3SAaHo\B3;7/Z14AB7QDk`Ei,ok|8 c͈߳uȄ,;`LӤ۸ZaE}2( X(X \Ms6=;dɩ߯*,ؚ kɑf#Mt&IuࠊwT7VC&!]!3TݵTuPyCsuRVPT֪f4Cv>_+ZYt`#7q'b\ dQe{+,v|bmE{6~?MNBl~;JfYl 0a?iO >pBk1(~yW.Is8G*\)d T<.p0`3O_!,YKugt9ۑI"c' ;`#ڲhfy YbYqV4㕄s¡Zn13' %oyG~%ZrуfE#(:714끧'q$5Q X JmhRǵe.N$cʀ*h#=ʼ^c8( hkm`D5m-CyLS< }j?4*QO|LM%,}=[(HA4 fư#v=kǥs\xtX靹⧗?ߔB{s^vYeDm%nВP 0&r=T1P4cQ4KJ"1{xnqjm3{77]:n59q{AR$EDF)Vbi([HeRvj)9u*U:AFIdyW>c>z4f] KlwWDW'4]ŅA={*bY ^K4|1̲FyeyC\QFA7+4mi۠f 7QZM"pI-R1kj/Jc>Fa i+bAձl22`%vS2-@t`XǽBw= !aDyh]Ko#9+Bf02A>v5 f;ݧE|.ɲ-l7(vZRJLgRC _DR <(+ʷ}^{(?U0vӾڱ3Գ \u᣸qf_SZg7車F c`0nl6/G'IKF|ܱ+l2OL)UM^MXŐhDEwӵAEވ8 ΡfH:>dc9ߘ7 =[[< oDA1YSE<4 JMR @q@cxoXQʺ+Y2fج  yp;}V ̆Ws&v|Κm<5Vk.].1ʀ;Dop)JSfnŠ~Xd 9tĬqݳMdH4QzIy++ձ*:xfӅeq}u㪬иoTCp)o*m[QT 蘛q`D.'4%45oa_Hjoq8c̾0Gr[C;vo0U1X cά`B੧oڱѳJ1M (K_ErC#( #6Y9.ڝ<^ie1pwb:k4hӲ#R+ F CQľ[k1ʇq4-Om8灍pSƣPvߧ:4͏J1Zo}&S0_ '<)MDzѦX.qٳ*I%\S4B=zY8|_=zh3nVCdːr_xan=q4<_T0ԉ4+*Y1TL 涐]w?6:}!/Q54FɢMRTd.pDQ~3_54N3!žxnDATg| ShG%@/Ÿ]㥎sSoic'c]*6#ʄqҶp0c"l} dͳQ 描ܰp"Qň#Y[eC ?}c@H_DP,1pɌ!TBAdHzЗTIc̭\`!z[缪{bV&m"bf ;l`EH]\Yœ TR0+E!XYQ1t7rYS` E|]mUԍsO<5_<|xlY\\Fk7( ("l0)Es .`HWOc uړ;W݄--4Ϛ>T V2+i{޽*RS] Ⱥ_!m{uj:w*$wxD,#4]5_$zoYUڪYj ֠âv6`n`}|sRd.K_iJ5$T᧵"((Z ƭ>}s3T9f?7EhעG-_oaD5E:+DsctԇkULw꯳Jcy1[4,g Ft<_UU1ň[E "<& 2$hAl}X8&#oy]Ȳ\"{#Re8~ 9S4* %:(c1g3=+ NҒ 8PcWBڂ9x.{PoZXChs|L㬞Z X 6uŸgry.*"\x1+]b^x+Pπ\żZ5 0<۹ vHK.|`u)"+݄īxm8=s.ɨ-<^Ƀ7i0>Ɋ[M=j_{$fWWhPIsalqHt_Vj"QrRI a~}tu|*djg>;vlu8F/tl!D?_OqE/ׯh lH'%""%s6|y'VB0 * UR<"2Ƃ3TBd\p9ORj=mBqwɾX/5d3@֦DMͧ5~_~u7~-zW7s֛o23my{J7kdfIK'E9"pZJE d*5l.}$Uu7U͍,Ԕ j y< 3%+֖3bDF Ktjh<= 2*ӄߞߍ^aӼᬖt ."yCȀUp4~Im.T+bre3̗sz"iIj3/4R?%c.U4Vrmcӧ_vQLV@Z[PZm ޢFK^xc"}h];)hi^n])/i/_ ~ÕQ5&Co[&R湋jF .SYVECh٨=!,Xk%..PU*]Or>̆WzwެΚm4j;uC {K3uY!(DA LkШ6T`eDTa찬*ZuM4/k+6]+J M%Q<+ Jq'&Ƭl|I#kEnm*X4  A2@Zh%9_֑ؑr9b}bH'6J3?\4 q3;Z&:#Xpч~7Kެv9Lm``t:XdF@fa] 4G߷(mXVXoѷG?ǫc:Wz^_jWq~ EtV0VI2'МHn$t$|A8q^|$SkbfAJlج! @q&Rn `jGnet^_\/vB>K軫^QσԵVuԞꙫ~ПO^s${ LVjITrsm8|d(D)3Ze0{hSr7=n>;*#n{/z &_/nX֍߿Fy:/_LOm/NVnL&:G(Nq{znyw@WWQ4ʪVSkPSjt$GuA}\`(5ME]fU=aVa_$T/k]췱90ABKU\Z 6z:(YU-Ǣ&o,eiwV۷z|t(UiV9Y):)@&."lAaP3$kmu-R{gJB.FE z-ToZ]L`*`t&֡;чEl I9TD#{ɫB/rBHAd#)`τpx!Bɺ EL J%K6)w'&So 6co{S؛z޴7GKsܖ6t8P\"7!<-!5A?q̪ LEIBRn)Kt%5Uΐ$WhFխ::גoJ:xl Ux ڃE .Nt4RIoSTq yK7X\h jJ(Gq%E2HoχIs~)2NԢmP91QɘXyuL,'R0=a5YZ%@՗-b0jW1r/7-y M~M˔~Y2`omMíoHSX/Wxlͷ}&|{oՙOiM]sD$\| 3/<fp2'eva&nzق=j||3h(ߠvE{DXG>FX1mvwֿ5#f=J{׿()'&0d_$TNR*d^"Ő O7X!Ol+Z%94jCLl_,}ТK~oh)k7_#Шe'v`C= ZX;i;C yz2oF-߃|7^8?{o@?Z`{tZI*>v2}K(1\;-J>vW<)W0p8\JQ_,\>G}*Wz\rȉ&'c: {їl|q%J=e 6Tհy ĈπĈ9$fW[E>ަxu W#2NtlAR(ȀDǠ͢0fR#&ż :0wC n /LNLo y` ]vPGN+FL?vv <3N.橑Je;}O`٘>wGRt'"!$+7^WzXjn`$& C.B/~O1|H)ZJ6'IK/}Т҇0z`(ߓq8ʃoA$cXb;N :7!UQa3` O[Ga<,83#/c5$?-a:aݠv+s*P;qjxPAvf }ffJ F7DjyLL@w?0 +Ր%5E,c)=Z]mL͕%QuXcǷWuO w]苅r:Oڮ0phU塔) DT%R*\|tyJFH*Ԑ"u1!ؼI*S]$c>iCƫz_xy|MeO!# #cmbH\rj * G%YFRZɂVkfE0J%*7%jbewmSSz9B\qHgvGbƦdoCSO=>#٥AE +CVL:UcU0Mg nW:_SlKXG'0ej.PYVX$\ҳK\ɪfgF XMi޵mGhBqyY|y+ИKicL\3͘!*+ ioBʾ h!5 @p"< y$2ygǵʀT\ʦW [)2)K$< *ЫUJb*ѓPFɍO\H2q7nc~HV`Hr6M޵5 U!x{`%B|8\5>йg4j³#Y=vb6jó4}rIA!ͻs])wk8~fEe^EKEQjwe7Oǘ6H|v%%<ׯ\$ZKip0Hox"S)Sޥo|=N-:uI$D̅EZ?1rR|ZPD7emt;0q4@P+5#zXHdna:;S2չ E&xrzzscǧAyN,TVKZ?S\8] ^XOx;W?e2SX ˅jAs12N:wt8q&և79`JԲ|?YҰ"WClƨ2f+X먣%2 XrV64s=}}hRR1az>Py[]xsʡV QzƉ.;Hjܪ!DѴ=XPupͻLv}kc~L2Dx9:85RzSa73&//.c$%W}:O爷u?v֞nZ!hA֚}'GNɖ*wLf1@rƺc#koڛ?X;㞌v4 φ#9 E嬔΂bWDAW6vTgrـS4jvB"g>RX;hvõdCEƥg6p? û-AuUJ)A68xۭQđu~&$pQڱFc's9h.tƚYpNβ€T$C0@>aSR͍}aH1xvf7k*Mcm[koJ;|v{QXl = u΍C(%;X[(eRsx \-|p^#[ROOcViڛ?S0N}"DfJ\5 飦LpWfˮ弦?TBb.^|W dcO$F=~)YnYO߃ |4Ӎo 'lHޱۋ{o05~ t豙K Gᶙ ;Jvv;;m~rhDW0b͓{ݡ6}1YG{Sv폁v7 06sA-.nv[Lnv?94&Z\_y՘s&i&{ .e])jJت$ O(fJ u$3 gm9T))Ɇ:(^ FPIN&1FJjT?TC6 ЛT0ȮbaS|Ii)`0 #-ûW*eU$5;ޓSX2 -{xZa~d1v(k[kd[ #cބk J^hDyf~gO4&_)mgԱNUԹT=mMہ:!|KNIA{ڔhmIzVޕָwns GD3K0*k·ޞ?~;O Sob˥TZ𳗸E:H>ViPi^ڐ\RSg x pʹHWԫjw|ҰO\Kkqws)2. 7 dҬh.pM~p/،8,\lIGHHq-a` /J[c}+m2d>1" z[nݚ[pxH\*ŧܱ щ))Ag0'r b݇b8a5H  AH[խ%-@ |[k 4oH>fXwioe<:0n oX&hPÌyn;WgYU;.7 kPSbܟ?4UǃF{Ǜ=Pr]ӄ߆1ma%f4B L ݖ|X=Ć]X#LnBR[9y(-{H0Е$Q9 U`*N4Nȿ`,';<5Sw6 znpvo>c~À;9c3=(;\Eŕ>KJy['/JE$ $/8$ ,ōrOҽ^vchkd2Xy!?>_-Ǭ(ޯO pϦu6`Y{P:PnX,Z.-ZV^u.Xd G/٥bf(%QçUn Yɧߛ%{1^pWJ1oD0M _|#V;1lF?Ƭa #׍DhC# xfѽ. > -!bm%'Cp#%ߋ>1|hJ}I>sr˷Gfɻ!bΠ9d}5!?܉{jˆ+ջ _?5*5u@pڲ[6|X F4ӟZ}:x"9Ney @ {%N(R&oS ,0G!e}jol{ɒS$S*VUpQi9g!:/sB^u{z\WY~XՓWaWңQ^jninϪuE;PGfɻ9R@x#A\Ѡgmb~ gʈ.\SHd>RJH ׂ[_.O>7ީ BJI̡Jő3 sG|8n#\o"W%yļ?cA5r` d};X9pM`sߤ-9vm:U߮?N{1қ&oqһYnHulyh-{k\ kX@4uĨnFj9XF ս  F{D凱Nmn؜O蝜A\H aO˕Vu5e+g1n7HPD&qv7eco\C.}bh1X}H>L`|Y\_?חu? yjl#6`<T3M ʖI# X]\n9.cS[޵mP'i:ٶQ`v#Qex{s1RHk!4\Hcl O)U /߄и5 Ob# ttw tpƆ6E/sίb^[g8bl9Na5 1s6Z9nAnyGtO'%fs=:ysF|KL*{aQ\PH9 TpyIvmw@*ݵZ`0v>.&bmt*ںQb7]cM IKlXb馫&?JIл<kXO5wFrN61E' 4&m-L0; qu`h9GwU;)q %Ln^B]yz[*ib)u4nmQlRCM.J; !.MКK v0,/4D2q}*zZu'MeFvux/w42jUؚdZS^: K荡*~LyNr6*fƪ;V4m\UEr/rY׫B}Hgxm1^A,n/_.@T\lO(5NŞG9u>ƨֈQr3"}mgt1)JUߤmrel5kSEAd P__vFܜMB**̈́JW5z[02I^j+1dTml%Q"RDjZmSk %"7g"0iMqWsכ|DQPy7}Qo9pM5< oKLcf3{r4{n&x ώ(50I onLk]r[xn 1@яVm%䶌C02w}I $xu{㓛u褮HYS!UMrЙ$O`'Vbխwbת3&·ߐK.ؑQRe'XTחUM{ۯ3MC IW-ڶyvLj2I`/bߤSS"zNl~b%RhVm.D{/ {%NՁB=9R n`:1@v [SLcQ;O YsP]vx kiBÜ7^mQbGIv&`S- {N .ʙېBN;SavDSV ;2<frRQngxB @neLO%K#fк'eP$_o/<7OWJ'7'z}6{}ʸRA4[%<|F1;IJ)զڴeUCA+Vs pU[p(T̡3>Mic$P+ 4 Mf')cK[UHR]XsğMc20̰.<ۛ?.%i0 W5RwK]״d̝"MJhIn%!}Ŧp|QKI<֩/P7#[f1#;KЋLM][3se(]ԏTѹۃWP! Ŗ Z7hZr; pZӨOGjw#ցcR̳BۘEB2J{gJK-y<J#ҝ}g:k\z|WA;)V;`u>ǣ<pz0mvo;额IyS $qQ*8y]'ȁkj7 rZNY <@u`U**7gEgrZY-XE#h>9={!k=fɻj耂 x9Qq|nyQ-1~c#49h 6n-y?,`0TGL(`akAXs ,L?`[&ƫ{VLd1̐98Jy7mTv-0ֶj-FTCDpMhz57QiV:O鼧)w0m-2>V[=y8Zc{sS//>G}~m^'ߐTg*:wשSW9%jugliқhŞƦ|^J"4Kƿn\~9]KZ%j߭gnC]8a-|5"i,z}63Tȇ/ Sj哾m5z"B.?vt?Rm6/ly7IJӣcP5y{u|>\|SЦkey[?{$} g7~ٯ &XR*2Ԯ){7C 3ssB _5YF,<8:+qrt|?b\B=κyEO]cd(ƽ})g?-r6"!7 VQSJ͗9'Q?&'@A.YI &^7b> =ر NdC1뵉/P! ~V T9uCtA|C[9j >~*uBphE1OS~> _ y/c'$Puvߏw%&cc%=gwܽ&dm^I s,ܛ 4"\x"Hq =ҭGF*rPGa)%rN><5,b[$g=W W(Q)+NfUe,|Jţ{-ls~:߬ڇ1}`^OQ_l_YͪT&?=S7 4]vzOMuU̯/Lw3'_>OuC["; zæ'ZSX¾WMV*P v*Et}`q)+ TRra.H(i.Y& ΆDGF07Tvv! a^n#EeLav ?-0cv0 ކ9VE99(̮0!xS`.Io١~+J|̎NMK^vXP@)̮0=ظw72_T˖0CVc:;޴ݺɖzk}~] UUO;~8!=X37{o?{Fb(;322#v{3ȧZ,-NjI((,mKeْE&*#}nWJwg&0~adbYNBaBk|r{ p<(#1'hyu㏸>;vdMX9Ck<{#NN fh7:;C0:2 9K#KdZ=C=إ_C_DR!S~wn\}h%ԭajRaQꙭȄr`% B9ZbգTlBpkw<,kww2\445 ^Wݗ!!;K)Fnۈ )W r&W|3FD\"I#l2P -`8 ZN &i9`oN 9::9"D&e :`s0$.G`TVBlR $ҠJKuaAG] ILQ]bd;I^P[>CِJ4\?YS]yT1(AMjȷ(&(*\si5WLcIj\ dd SIJ~ӣ#.jꠠ)ИW+\9&; 4DfנgCzUDu!Q&* ZeN}'hlE.G OR"ٻ|/}Dh`2d)JQL3e-D%Wb6\(*y)Eኺ]M2)2Ձrt8~!uSմp!}~"3Q8b=PUJ%6B$5|UV2U5"\[FdJJMjDҊ~ BQ̳lgC J%,6H;y6t oLq&Lxu Eq&E/BL4:6iBY}i&rjv^CSn2 Őè0'ڦA”ȏ1`F7/HCSdFjV9%AQĤ\m5c_eyU.*T젴3C@5 Jdc*@zA;to˿ DkZaP,(!%brT & 3b.l^T٩ vhMCU*[MZ))YP[D},E jdr-SvS$IvbS՗V|7%Y+bE2Q>c!1lVCʠJb Ss!AD#U:62&MStDu ˭' Pi'cWY_KBɊ8SͪUXz+?$Qw"VR(!FYV;emJJYMx]3 N$EqV䈊Q0{mʻ lhZfo^Vdox}]yښW`~7^dK%Ubћ5_SO}if"^@kT[Wø[[y[7ZЄQ:*5C^y}ybO*n }CY3\dl \V; \\ jLIYIhcKٴcLp5Nq*cy2dHM9*\MCHYYd&!B5(9=3ڠmA12 9{9'_>^FNWgb ܚ~P9L`lSB^`_ ) )r}#f 8Q_┦Q ox --Z_]exHUG{EB4CF¯OR0\T+v% -4\<)AKg?WT`$ M%k>jK ߰a|q蛩=%im5"nؒzu}Z$dh&ͤ${MngpVF#l9#JeFg{k(9fg?y{gsdO0-{}=sd8<aBQ֠Ұy3ڰAA1~iR$k?u&]WYߟ-ޘ^ 3EOw'n|c_9 *Eg[63CU@tz1S$P+A`bc Q,QX;ޛ-ʊ1^ӅGI/h4$@ݶ?.uqjqHtNO<02<ý=6(GDkxVj4ꇏkK˕۩ꛅmݟ˫?^g.f߇Z@_^㕿l[A\7Yg>.osZӫ%"njI]c:+wpLp|ly,:q]n7F+ٗedž6{'Ƒk NQfUͺ4:ww>?qIJWO~/Wr-Ni^MO}P\1z|5ӳ|"'s`Í3zi_WwJlI4%2ܠ>te(]\hCOgzo_5KfD&ڇn^\Hx=/N\{?8b~|qF;ϔrh9f_i}}!=%L(+ 6#7WozN wz@W+w\XAb?̫7cr͋#ϞSR7S5y2TKqjr{A_털n'c޹6jF{Yn<R1·=EC/v֬;A_1[vS4?ff%)ǔQ2qhiQ!Jv+ȑkz"W2l;{!?EdwM%r.'Ȼ*ٽ GKA&x2t`N{#~̽|N{#ԝoIFtFA|iZrytӦOp>BjCGJYkhwd2¥$zbT@>(,JQ =G.jL c{ʚTNɓwG wB۟ͷMʗL-jUoMR)˔O9 @5ΨY* KJ?h( @ă+7ڳTkxnzZ%cXo VM5Õ?!R1=s](&a@aPƒj6: “r04`Z~o'-x'cc0KK 4AW@ aVơML:ңvv&cҙ0Yfi#5%YL4ي 6٤;ԁ7$elCHC7|FMJ,&ћtV L:I=6ؤ[}>[L I3hHnnlSݢiz`/lOk|PjbU$jló{e5qE\<`)A{W勇8_+I;)D.ERL>zM4؁Sw49hRcYar6DSMڑ:qXiP;'hvLjMkfn?Ά S8㢻_7dEk|23?+(. ɧa⾳ 2$ -sӪ'QHN63t_"%̂6Y dBؿxGE_<|Z]<%wi,V}A>'@v4W.l ]N9WdHk#<tx4z!᠈T!)- W6&Gn7s99݇;ۊ3GPںT( IG26!K+5AޔHZh'A Da,Fe.7.e.YKlTrqZp)]d@+iyK<>B:- %1Dk a41)MpiZ *(UXIi$Do^ILEҐ C,:NXJI I FF}r"4V?DwEO7}߆qC󴸍~U&@;n~)woߕAOs&HVb`u||f*8Su% `Ԫ|)ck:[)eEA[F%:KQFEtD>ѤuGsOjvWɓks(v >zB(NȧTD(`fm-qV tOгk: _s$R50'I*ZZSLrHFd kV8Hi 5:жpR<.E' t2m;Vh$T|5| qϋF'Z Oe^ZkT/,#O6?W›-^Cֱׅ+X\ER1842B+' KE,0K]lMRQmM4 Q Ҙɫ,uh*PQ(Yy9GS/H`R;!ps"]9VogΉmoR߲ /5[j7+5f!-EvJUwB#G=Vқb̆QHŘswqvF_JcnHc}0*w_\\4`ӄWy#1\\Wn\nKIos6.QdJZ&"@(LDE HG}y'qlu/7FVhz>hTz^Fճ#0YVvA:&ҐZu@\n2ikjH[mAH H0KS|y3;`z ٺU4G11XJfzYgpiDQh/~Gk)i:>>ŤlFv]ܷa"0;N>ܳr*}!jASKy:2)F~)*%6SъwuJH~mgXIh[{f 㭒LQ^Dwч^DIBR /+#L .υi" }*j޵q,B {vT ),p1dB_#%%}8DŤ89zY,Q,\-|-ъw|+2'v;Ke(N8oH6[ad#-ՆpK-Ğ.%-Ŷ|#GMcfDZ-N FmZn@RrpyH{+xj;%䛶o;nߚF8;6ZǸ^PBCvih'*d#hT9֩@tj:z%ѳOEgc$Z>(?glU:.%mx~uc)yZ PD/Ws;Nr3HP+% pP(Z|K1ο]7eǘ`z+˴dۋ댹ѽTkB*$VUJpz5\y[쵼"پ5塷Er2=ʆF|.W:[oD:WZz+pKe`9qb/0rwV'F6R9:Y[meMW[V7|j|0;y5^7뇟*%RN)VtJu:pm<]T"'kiyrN%2B䟟&MǨXϖI/sĥO81zCh ].35Ǧ? ҹp ~|[*9.{-GK-4ч}z,34Ůf?='}'.fZGkӲ /ԢYoȠk{q6^b-qzOp s>y'8oo/+e4ƻb2<( -dь:$ Em- V^$7k!_LYͳ4/%fnJIΚ:|zh^/_{43TS6d^um]~[s}Ջ~Jmސ]l5ڲ#hmÈ7rԥRN/J)ANZQ%6{@!#eF s:HSQ9E#:,)h[4O,x3`]h;/I&7Vub<<Х4 AυNC@¨͝tK0x,xz B)Ζٞ4o.Ӵ}VpJ`s4eL!3g!^4BM[RQ!pJFQG!-19V%O-[ihZȴ^Ei EKXJ`*Mᐂ)EH!;!rq6rH#J0v6L~s2mDYv^ͮz3ju4Ç[g]Fw'{H _؅?uXo8Wj'+Ű~@P#v]E{z hwsRHBf{sMz4x=/O\kǶ8mIk7|=L+INK*ח?rxa.B/`҄ӕhdQ2U!x9S)V+1nMrق׼Wd4%JDA EC /ʈ1B;Ư@8~)aݞ¢n(-,w j78ߎS/džjBŽ #n٩/o*Gl0{{6J3;7p&I"9*Kb@*SY'uTpHq)VaZe!#׬#κ57?@Lmf mid9c)3fe{C#t&LUm k^FOZ.{#"<$4?4Q}:6[ !C/uSzN+t iэ Y7U<ʞgV&l[s f# o[ERza>@(V EX|.AFr 2J &[cZ?B9Be093*h0$p$BhXZhnp>$@(3d PtXeOM?p%u0 >3北@_# Ռ9`[f-ɴT,$_PY"T&PCI†3^'ƆDG*uQ"z8I[%BPAѶC'34.8|hL)F vɲCg4; ll,-\Fs[72i@:,aLjb҈,e[- xCw@yЎ'5S\ 0 |Զ|fs&w х\GX$?SQ < (gc7Ժo%h$"-[ۺZAgS<~o/:_qJo(p{sA54Cm$ ߏx_'m2f~QA45w0_*ɓGWo&)RtwN0ܚcG[f[c"D;JZ? Ix}i-i[ōYY @~X^:?f8v/^POI]|{|~njZI ?# To= ٛc\s`i}*6ksã6F5|!^]q&1k' s"E~DLӓNk6"ȧ *^e 8v(Lh$6 ΕO@$e dYM){bXv/rtEk %HB:h&+m.-AQZg=hɔ$wH#h+ĂxmAyB @ⴑ+R)iPrK3^.YxlN>E/X%5Π0 rQ䉉8"'>!tR ~2qCt\Mxiܻ.O=L&DH C~m0$*bq6tjHB Ό$L4NwOiDtcxpI̪hD2Q!$ށz0(A8R(o Jjy܌'EZ&(%=̯ !|dAu!hQ$I*hB}}`\6(:M66o#Фе&F0 ^HlE&xh4E_cҫֹw]:-ms ,<#0=<ݴ[ MMt294؝51Iud^@NJD6J j"ߤj!Jfr)LhX d s' JK]PLFj7J?O/>L/~]TK?r(IP]tV~񝕉/wz)x{ "~߆-K^=\7۞R}וW|(giEJ]|sI/Hm!9X_=z|Jn#Q#^Uk+Ÿxŷ6r2c֝s8l&z ]ǰl a%h UacDDd;.m3lH\t GDҒDzi4HJ,Wf²{i?3_K)H]2N;tdI" aHß\G˹ 2B;a7U72K!RyC#x+2"1 R>$d0F[#wB 7>Qèag(v@z2MlW$w*PA=̞:kɇ8"3_m F%6\1^0Ќ4ؕP'=mx~UADY8ƒ;E,T=)gjÜ"X\RBcuuuq s1˒u†Ei7 [c~?O\3JHmӒ}'Rgɏ; ;ϨDz3APëZuUO:ej]\"#wlϮvZA7=`ڸsP!#sqN?ٻ߶$;k~ȇ9,6s  ,) _5/Yq$$W]]U]e*qxW~x0"$%OI y_?+mSnXVh@(MK}x\J<.wvܚoclJ<^|+Ra85>d w@"'x/\/N09`XXZva?EX 7L _v,( (g(\`a*SW88ZٝN ŸTv74雉gOO4e'X@}Ϗ"Qh4osy\/~H.'IsƜ+獺TABkN"P%"R)`ȍJA?W{f|)  XHn[`dlva_uR>/4t'0_6XA΅ʏV(?ZLy bHw LS픤3"Ą c0!Z`O)R6:I2JSÕB /J=c!~z C>D@k3r/#V~t`GY]+^\Y^\STEa82tbD@Ge qȔplΥAvPxe1g@dd$eTn$aԑ+(F=&B @8(4(8A,Rә3F2afTq&rJ5^YLP i'^u'rl{R}jg{,Z\'ds7tf `Q!=,;DP0@y};vb/?:3pm76szV-<'̾]İhvͺܟ9g¢**-GG2]༌f2{afK‘TeNnnveZR2˹mZ]+ic*`ŒR1G֘O, (`Ha5 A`\5%|!@H[¥t=hdH1 B$}o_E)ҏCa0OS 43>Yֺs?1- "2ZaѻruTRgbEv>g%/9m5a5#bPXj}\3kPXs ]yeTN|FXg+)xV®Xт,U '"a+[BNۛ͵^c. L}\ xyUQ.zCkW;%hZh"&QKFc ǡd"vk; "p ) b*⃥2u l"cM#)WE呰ꓤw .Ed p9ʫÄfŪgxVx D!ڐ]!iaЄ{Vg!(b<ҁ@6>,HK`MU]IX xV-KR ;B,lui[*zm^.|3Q-2VPp*+]2Vk[FWʉ{TX9z9b.Xs7G/.irZZ(N]Mڂ;9A-t+an.'7* vHZm=#ҢSA&PWQ`C,4E cxAG%U9\So}rZYq.DŽb 3':v5.ź*B3E|ݾ$({I.ܣ.JC(Z`.s[qC R˸P:NVjȽ:y[& 竹\f*H٪amQ=-RSS}=Tж*mtlYA\{ \jbIb\m<6R 2㸝g+γKs˘o9ġ&[QN`lZݰ,*,a (5RfIpy,Z6AV ˎW$$si&kKԷ{2셈`% 3auOXit8J̨NpmQ.hz^+ئ2{ˆ?$qYƑ,o$;H~ Q)Kmt̤?y@_1KxW{ʛ t_ |mA*qlSIR䱍IEÏū-Z}vm1 4tHbw@Nxᩲ{;ZitpVfu5?\۪%f+wKU2qDZ|\nc Hc+R=p|L8W5Tݗ¸)xM&a[W,lc`g+[g%`4mYBTS-R-0]>M29HljLȡHR|xfYjWl9L<-(8;QTK?"`vk+5ҮI%;횾< Y[[M1E9'K rXkh2i Ld˜}Ӱt[iRyjtm_xi'2.~*?+&ͧ| FU0-d;Tڑ(ER3!ФzTWtanGA!U2xHJW5BFuHEs U\d9_fªO^+,QsDo4G c}r)2QXQI!!%$OsQAGԝ2Y>&쿒Jf7VE[14G%";؛˝Ƅ %0:`q7f\u"6k<Iw6_Bun}o<;=Oǝ8tFE:9˝ĨqwtODw1pdė*ta$;6X^\2YIdZFޯCΥGJO.'կ^&iƞ-Q8}{f&a0xVɤ-kt. ݏ ,Ϻwu٫ir)a8̒ٺx~?} m|'^]//d .JnMA3Ylͳft3~^1?OnFIӷS@d&W0̾{O~) m:aoA,W0|snrt9y5LLc1}f{68/1#s+=XY=ͣ05%( >}\y#x+!ǞC0 $˜@ImP#x L@$k]Xk]ebB~|S~=n? Oʊ`1;X%:&CaJ[[Ȗ`z-;mM;er/yLS3H 2G0LD њ[ ɝ“//*6RKYRDp0É'5gM8-Ojy{gkh=(^'_5B z)atfY7˗IIvb%7K_^Cy7b4#Bpx>$E%lpr\S3kD:X?g46Bl~,$;`dX’\ ? iC}-÷2c ji?_StL?Ojl6FKD1D r-Ghq@ezy(~ NIQr* Ca ?XPj?RTgHSñcVR ֱR:)KH#Q2(taF9Mq )mk-z[onoBxn޵q$BeO;T_oyd`lB_-:eKT'!) oRgHK ñLW5_UWWeF[A 2@KɸffLg72msπKewF :.sT37\%w?|nW` ~WBh&HE6T%ń$1j.8E';  `Kg$.viXkh<衆)\Wsxp_~Rpy,1x}8Fo+YBiՙ zX=PÈ>7R^V/GX+d~\ %RݟjF0Q3c8_2seqFuħ)oύw?VّǭT I3?Ns_*:s'94R$"hfBi)K{﬐V  A ~̳E g0 Jyj2:HV{i"6Zy;:FbOniYແymfU,~e _b ZgfZ4H: s i'6 ȁ$qhYZR@"Gja +A)xf5ۄd>|Yi*#Cd"L<؄OLY+H$!R$ Bq|$s Z@}ikW*1Fu3I"(Pąev@QᏬG{p1^***ɧK0քpS d (3 #ƒZ-j! 4FD0nf)^wvq 5P QI5FC4Zv^%ù3|Q`"MIಁ I*xJB(/22T$d't[珫yI2z/H:F2R%D6yD]%9!ʤܘ]vd5Nf_[QX/kE0n?anigMz8"fBdlqvm+0A5xqv 6S"E_WQ\Pp-Zڴ%ϝ4N ߺZ?C 7zngEw:Kowzѻ߲o4..?:aɊЪ_9rOit}3f[dNreUNL>n.ʿs5_ ^al@iz嫿2"(8.9pu2~[~5HTB> 6)mӸ觭}ӳXZ(m7}ʠBcvLwXT鸯dѡL1(@FMK_Eh@]򔍝M@Nb;Eۼ`us[cV++k|^\"myƟ@-wjEB^iң~_&d#N>F?=Ǚ˧04O_ LV|ph2m0ףo;+;IqԤU_]T/s@Ҫa* .|2ܷl.xHw,QڷDw wnEl޽ Q ?B,H,ߑEnhJWXxV1zO9|4 tTu2t KQlfrR\>!bO_fj2z(kεuN88|M齂û Iyw}H>#FGW.UX􇏛#aIc gᾅD)};GA ߚۇd8HzѣڗI/# \H.)Yϳ67Y;H Brŗڒ(@TV$.x% 'VZ:[Q7;U^>QXbJ uEáBňVj컚\;PVn<33dS2jȟzIK SSj)ڪܥTs nROu6^ʕ. gE36X.\FL+O/O/-]ѥ\+9ۡ S6Bh:G񍖩I è\$YpHܬxvEzM'YȝJ櫓(v;s/0%uքncjmgU-eR @RX3몟V*)Gt# 3n,kR#s Ot> FA;ſ|Wf354^2@C:E:η*L.쓱vV.`ۨ*_*Oo7Ln:UTvge"p"|H8-u<7эًilⴤjPtE䛍0W,[SwD5 ϲ:X ϢJ]<ϻyxsab.)^@baZl;uӣ҄/c++\~}g3.Th(D*/g3[hsE= Ԯ`S~_lO#w:i&E,gw|d`K ;Z2;yUKQ"$Bw;!  _"l Ԑॗ1 M$wB:mRh׼RA6@!0p&y^nCB\ή 炅udO3U&K775ή$k W<(# bp_.[_K86 ̜n\+ȹ{17!L ڧS~gpÅy7¹c"90UƆPQBB:pn`<~ORxF:90D"QrHԚsM/Lnr6丰Ư*!"8Y;Le@FHJPtY8-F!PB=$b":Bxz.rZ_^H9 (^Eh,{'5H Ey4]d!rί)]dڮE Peݞ<3M>vKA>IO`rOҙ\rI:c4`(P'Ak+8B880K,HPxu$WKkkX+ 5l v0NFM/qI_9CH 8*8d H)10NdMN3f[/hz)ib$Cb Gf>$D@=f6R/6 FܱO= Жb* TI+܋1yD>gZ4K/.pA1ab.SeD&k;((2l|iu("SJNT:jy\WeYSLZ\L E$ \̃", '^*ˣS[a{Ǟ-p+h 6Uu3d*m3 41pGÊOrrp+#JXe;(.*"Dýc>UIJ4h+š2get>g~cU9ˌczfaoI$iFAlR&&E_kP)#WJȡ%H΂ H" U2 #dTDtHxEZNWC{}(e)[cPv qP9p e*)wZ FnI] Gֿ#Fl Er4 [ фVLLS!eiC-HI Ǘsv ql0WMDÍTuR=ӨIy[7)B2N 0*oZU:6/V:>(\ZJu.F뀉 A ;h]FvrAAk#w[X4#f~t3\ r`a"4k889c4Mcoa0 ;Ac[wZBghy3P ѯ%QQ gY:lQ#.){Ae5=C-Ej9RЏieeYzCS'-*>!@YFVQar H}]Ε롢\~2޵q$B~X''ɾda.u Iec/߷zH!)p.CI3WU]]u(= Z P~*۳ӔWi  e٣Z\&*֬+uuu٣Z_ETf|C(Ӗ%UW+!4u.W AƄ)WLIh瀤)<QSbyg xHhIq<ȭTG[hg&AVseCZ;@Cy!q~ K ]g?iPf+ Dȍ뉳m`A"s(f˦!+!!À.tx)5n|&R郴{:1|)jNI4"+Xv m\V7r4'__[T!ng!d@/=LNSV޿yӃeq '?yzl$ok#WۻjnCYK3KZK~ye O Ln>\&qǟރAi+.I\46U«VT+Gɩz^ }rK6cv7qC0C=<`>~ 4xφ7D$lG}̙>ZI"NfJ_#g>HmyǎԊ9|q3Iga|C΋z"=7{!rKI4+ RH(eDJlU:E]Oi {`we,eyo[V /f6ysWSr4GػYw%R} 9ʚ;XzduőuőUYX>]hITjю0OO OCӧ`Kʵ:֧?Cc5aJ0mVcZ?=8QeJ*E=2E%p<Ǭ$i6 ԆȩcYÅQ\E-N?](i\l%R{ri^;M/~r9Ʌ=}M=n}؞Ol8|"7:YQ0{ŁsdAeM<`?2Q¦*%T*JLH9BmRQXdZSSEP0<,qڎ`<)p U9'RL-K !EA}z' ţv7LB:Aa*мӐB@sp(1rvhR朝3de2ϳgSZe=2c' wam!m1j]f1 H!sA Ypt*?&y{ ʟ~C$o@ѡ]T.^8sV#7uAߗgqG½G7$tcqVRdQm2 C@Z.R9&ef~J‘::qvcͬ%aQkFRm\s*Ԩ*f$o4JRL &}UbsZ{nh )EOwb@ϞIu>,_#yƤ1<iGeqw3L',qj7)WBK*@Y_Z1JǓEc5^4Vd =l/i.xr;85zR3ڽRP1-/:s8Q'N2k6H&>}J[zS={qNK3tm%Nn[t_UY@'8*j[_] GC/w沆_<,2]_ [ nqX?fM9C%k[)\H%!X2M\sfTLZM9R]d.)ƌAt5v>zH eI4w~,nofS7mbk++Z/]@Ƭ"ؿM&R W)ʢ|^gY?Ԩ9PgAShY̚&2^~w4x/>/@*/)O5j 7)3QR9)ձV'InVe=|(iզU=8#QU3NAO-&B 7Kӫiks"6"vn ;7eS/6osԬp|L[3njh$czYhc̐37jm䷲iwG]9(sK\=sԘ9`巩OBKFkwA]o76=>vT5&'C1#RYBٸk% !5\&#kxxDwy Gw@r[SxwN4FdL6!|1۫S/jB1н hfD_&|߳N4*~{'|_gZ͚_|zpbwz5r 3of^a,FNmuK [ 4#h v$/^{4<ق*#$VZuhxzo4eցl4'٣ӻGL=lӻ+L#!@KI)Z',%k,ѽBEı}⥑CF+~XNo+ZCF Upe:aehq25^q;UcP,c {H n?ɑ: e/'U:)"'F`ۜ+y '.3fTC+3LvK6F 9@cep^ʝpÇ7M?0N%gsg٬>F*0)\֟zPND'Սf Kvq#N\k2=UӸ5P!C8zN86bLj R"Ä=2O)7q-!-]8@|zSiԢ׬lu>gu8 jJ0i @ŰPYMp_oݤI`^Mf]2@ .nf2m&/1첞"߬ɔ=}3e](~.qрj3|N=y~&X4;09d}:; enwpPeEg$o?NNʊ \D.~3^H{ Y[[ ^1.>6/t0x-;86~K@2>g)G^" 6FmȽ_$ '0G=#\9A5ǖtq9k#%Hw㑻۸TUi 8 XBE+#V(sQi-$VDQ>Wʟ)KkHHQõp1hP: NȲꬷASE&ᬲ oi2â9(=OO i{lܗwx-qw0[-_L_>-ˋ \KlK2&Kw;?TL-gzs vj[$ϣ4XU++ec9{okm䦣9yjcn ޾ reeAg1ԂE5}C+Լ`F `*P4qtLӻ KBc(H3E-8X*A- SXSDHׂ-F9O_bcBkJ sQQϲ9Fh %ܡsTTD38tH)% F@Ĭ'o#e Ѿ4 B 8L.FM}>Pa8G[ fB%LloRrw+9ƺn0$_m} p ,/q'QSΜDy*V GU v ZJNrAz 䀽!3QyC!PO`I8,zz84gLu !-XТP"(Qr29 +%f4Z \D4cWQ-DchKnm~t}{.]WW'nu'حxpW|#V K޾"LL a4W8Rh%œ?#g^MtX9,v)N0Lnޞ\3tMffn}mVy$AJ`v~ww} _J5SX9 L:;D?~O4fƂVzC>cej{ȍ_iNna&Fnco>Fm+;o+xWli4-e 0QͪXEIM^3i 'ȝPhr&id+5NI Z&ae"C?sc`PD(! ފTT7<-uis :UI)掋cK+j*Xt\SN&4õk! ؚc}54)axǦY^S +}Mh\X N0njB0EN ǪaWWA#vWU\Gԑ=mUʉՔx[5 4۲g=:@-^ibUKcgbB᏷d 旛?W769#v1JMޭ$,G8%u念?#Hgo3~i$ nܽvO[ l7~,7 H؅wʾEֵf#I$T@R*YؗTڥ(KI"*`QÁ局F#Pt 6UqOeT"EKnw $93sZ!#RX? KߴaL4 RѳKvvU+Bf3Hx*e$2`ٜsXn4  {9,IB4Or[NpMJOt<3)VS@3dS=MCr¥s7.ΰI!Jh+HĉuE駆 )4R\,B=;N<Nh@4M#AQv 0q,RFf8QГSDf6 6H $β}M ۽S\Ė:§\ʊumۊ15!ro |>Uo8I7'6F1-Y'd#_0Wv`bevwh?X/|K-r|C=> Tc_X7}+֥,:כ&HCG"7Iii; 1KRK%L8$*D̡wLh9y; RH%v~QBDUza-Ӑi:lqҘgtr Vij(pmW"S->/C>K8%QSЏG8N1ۅc踄E`qbx5cJv?ynl%N޸wC:5y[kB .E`.I0>nrH)%,^D NB+ǭZc%zK{3:]Ul4: R|zxߍo#TtLBJcú6͕tU|)SL֧MWԭۇ9N^x7/8 $9uZ(T4Q2pjLR璥Jy)ySH|/}bM_4 "zݪ]c5~irBԕxȼ\6ݻOބ\HPߡi\=35nG]ϗW7h?l9\&u4}}]y})&CyW?;H/~Za殲׸Lw.%}y_.ී~'G;hO=g3 ү'H$ry:N([D{!aRM!*KVi-Qg6LTP܀oi49O5Nq)P%^Uqm[x=@ٺN8Ջ})$k_^ߡٿr/j1lBsğV n!C%$Ay])(l_nӯ7EBᦈ# f)a͐gn%wN_As(3~x`tG"b)ӻ]ݡt˯ߐ|m,*Ow.IS /.,-V-qu6_j^v75q?F䗒W;$yi@^o^O1J~ӲzkY}R?b͠R\`[jtST1LSGJfM4X@4䂍 qbM2澳cF)٢[xg!XphPg @[k ʸB #@ m3A}3`y]mQL/rVn.ט_0Oڅ@Ia?+3T>l* `İݟ6uh$@jfHR[%vF>z:xR6:K/oD &ȋ>ܻo\+iǖ=o'Wh1޾'ts1ZYsrO60ښjԿ]t7 G\XWt7ܛ gnYiU=۬HG/b_)5dсҷ)8Qmm3X^ֲke:e4N](J_SBj!"D Toe|:5]Q7 #Oon1ٍO>S׈RVY6}MFû$IT,n {!o#!('zM&8?e+4.kEa=3 odedĦ1ƚK: ǐ(:6D0#S;B4{ FƕӕM8x+4 B H= @+6i8y%bW13>I9BM;PߡE;>@B@a"%9HNn}b1=X!DXw0.*D.]6^_F4K隴0y42 p }t(t Bb+J::h(7ABT]3qm,2c@seHs'ub;{J-bw뼐ЫPLEGF !K;d9~H[G@Tq+0[WW[6"nf+`EsV~@8 v4eL5ln2kc*pv@s;ANxc0!g,Xf] Fbtl6`$#::y4@}N` !NTLP0QtSbOL$8` ~LT8JfJ<`5I)Wni]Se@Ҵ$Wl楆DeA#|Iyo уlՃ$z{ 3LFe @by'7vTIX4˸Ky΄%90 |޶J9L 0tv\$$:h#cv4 (ڕ2\D1{#J%mh7 Z'Uhw.p}lDR %ٻ綍$nVP.eRq\vIHI@Ɋ+|@)II=~?ϳJ4ON(ޱ_vB)>zʔJ " J?nR*-Tg>z >THH{؁7D"կށ7~3k{ځ7Dc;p }'PTlE=mM|㳤Xhd-4!O&@֐3' k842?YKXY "B5BA__Vnm9dJi _{M#0l_XK3|KʇI&RL42ObgM:+^a?X]Y{I=")kzI=")kzI=")MIov:JRjVRFIyjo%R]:bP~Gϲb߉{~Qp5BHmGN=r'/s6@Evؑx@D Ah.M1M=٥6IS)M5D@*Ks.MN`dmŸ]x(օ~&B&0"6-=+Qy)KܟDAez!<&0r6;S lzpp )ܐjxp2;keW UNHjx K}dh +ܮdhR_g+A!8]lZ؇5ٝC}l7;iZB9BCF0b@\֊]['3N,>L*dr?qbL]g0(Sj5bͼBAP8JGL =? hR /ʐN̽[ Dl K/K,|Z Ffx)q0& Y-)VR`:r%p%BQeka |h!HjaEt@0nJ}Idx.bZ,HM4P-gzŵ=6)$j5[+ũ^]Wؖ[Ch}Zp WW/^9.^5D/yxU#@&}q 6WΊW߆x:kRO;}( +Ʋ"l lj#Xn0/cZqu>2f b9iw Onʆ|>>)˯kd8-w#YRT)9C&_c0Y V5p DJ܈H2A~>QY685z>/zUu:x}9/U/ K+?@LY^t?^DU\  IJmƯF%@u::yYx|fct^U"ޗG%&U>p]&>i!ziY|@+bӌm-Sw?h_*]}>?hd@ ԈоSS@HMDgg#3`jV@ne2@ e 5RkV_.] QQ>$2LLĐ dD"8(m[42P+yV6$Eh4q_v=z1EQc{G—ZO~E[v轏'`Y8;a88A:8A[|8_ep"u ""<ύ(}$tLEAMeuIE1RsP,V(P؄\IH|t"g#>0(T` s '`  fVdw<*} bUHCyOb#h@hdwpK@ fAF|@i,QzU-ot^m~V5+L*/_]\Қ:O[[~fr" c(Dsb~Mg-XlSZ1Xǀ$"1}1'2N))ƒLGAT+FVKـ#ЯRUn-6 bZ`Dku8j(QFh+{ڋІ?PqD3FQ郛 <<;1g.!)~E aC_H|-89 F f=FG C}gB0j|f!@ $q;1&%FKN|Z;g1ք)JziZ#%js0QU'c^OY>3"{66;.Zs \ZS^)6 vʨz YnKgmY\Doh󹗋F0۪׈#r޶g(C[mĊVR7ӍHW>3ȄE-;NkY ~,{i aPc"gx]? ZŌB'Wm$ %!mHZ+.I_t\7?Ub_d5nv8*"k̂Њ B/wb~8ӟR4Ĝ*)?6 מ;-C J& " Kղ33x65?A8kI6jT&KӢ`RZ%;|]?3EzhZ׮t4K۰$,Ols#~napzm$Ώ,= ƎPs ~cSdIo=wR\n?"V?^fԝql-qi6-P4\)MQYH&7Sgp-?r cQN=LdTDvHc;O_*I^4|C?aݨʀ7.(R948p/ГE$9'Wpév[u%&czJWUU[ A<{zw儾@m)w9dҀI.ŽT؃n *^TJib @k$ m@,TÈ-n  A6^h %cM4:3RI#vz7=H0J\MnZc4nA6lᖴp{E g&Anvf2oQVtd&1fN&t սK5ؗ_ì INaFY&)Xnv}dK{3)U@aJ (QEʣc^%yٰGNm0HCf́y VVx"I7ls$$$+E9r\].2;W"*\jּŖZr.J8P(rT<ŶjkkaHP?|u1_|Qst6BoitEq}}3^yUR:F;~iC(ܣܣ7ozuEJaXDA8,x&߮3'Y} huc\V$\biozE{ޫe6ڱP،j mdlcS ;fw6q%!;\>}e͘RլHFVN`]$ /^ f3UGr_0<1^:ykR.JQ u@#pphQ3vŀOjmv>D~\cAȧ!pD,SbDhFUov(}dv:(=ҰgQҎTh=CCyRSJs,|eYy(Z!4^͞yA0:#qPͰxRw5J6#WBȀLs:.ھ(= J w:+{6`G>͛"|7w̷{UiAiH2ߙڋfYEa\٧߿_%m `[gOM^@Wgk'ʗ=kGy=~#l4:{3oŢI"0L YJSB^;^!\Ċgޭ_wOV{jہpB$KN!`\M-N>F!(dfH'088 _L8Ld${k(,{Oh66޼IMu]g_~$͌-]V0Zyi.0 4'f hc v>ꯥlWQ$7}lou˧tW }P'zdz*Ğk.GX@I㲶| <KT߫9,(( 3> \ E #$Ԧṋ-|{;*PCuGXtm)~]p͟baIa\ڣ^*DSlكGT@ٽ#G ldgNruI1Y̫ 04(n]MMf[R>}KpzxbLGRODtﱥ[ (%k7;̮xtVDKcpcqp&E?r\QDpU\Ǐ1.IGDZIWaJ7b{<+_OwuFnćK( CnjZY.0Y~JS~+(vB#PE~;<xѫӕT ʆOԶϩ5h^>diu }a , >r % #NAlh}!JVLwax*]/;ph㰯$v#j[Zch#_jv/q#wsc_l5=RV/ 5d%~ [{z 4m{DYզqy`1%ܷd۷n)?4wiTr%qzJAl1O \#'h)$;8ji ]Z^!:g~c~d-v?yO۸_!m+À?<ijeL/;Ѽl%J=_7:(5y!)bRJzfn_DŽ M8pbf\4lY{'.OX,HJ8wӎ˧lpj =J)ۊR{!$@rsUiM]T&$ jq1_~VqsT013,=Z@8e}u# Q.]tߕ+ "hs/6ţڈA:|~=e ̅HM&|jGp*w @OL0U⚗L>7GJ "8omgX KvI#m fD8^0Hk?* 3f ?s0lkgvâ8~i9jܮ| 4RG7P)=rr=$H_"ePn 6}g8pHsб nNdcD{)g4Zm Qx  9|<_ZH}Fr1Z3O_^@aj^ sDs2+qF&&,C5Oks:P ȼ7 FR=FHI8*S 5wa{/.!7=#(2a '}]sKdxMґ|}6cYbX'ԇ:x샃ayžq>+j 9trZl:}n-f:J1{Qe5чuwG :u{vU>0Bќvږ"x2mAgYt-SL{pty޽3M#Ǡ 4¦Y21G*7A[V hQ M;.GBrLYmy%fhdYG(/;a$ĭhPܙM gڙ=b(ygk0;@ABdϑ} nsϵ ݸNF=\뿂.2}Li؆uNq N\ (h%,(<g9 R",Io$-ft=lu!U+TDŎ#3$`甾AEwG0F{ҘUHh)FƽZ>ͣA\,g`7C^sU8%cH殴=j6ցؔ+e$M / Z 𱛄Im;EEuBŝrn7i'ȑ"[lb3x9vuhP =d UvR^I(k~2hdzMә˦y L4nݢ̟!gGI5W4.*?a}`!#8}n/FJƕI&K~k$0ADg7{ *4 564C}3eh-48)5]Ep c%>)lE,0^^GTMl k<4U2^#Orq3Iσb]_]A`1F`е6CƋo^fCT˴# fpizӆ oO-m!0?H7olvnmtcifZAx!7L\9Kt<@A J{w<n1Wcny#eњN˅]c)r`;vN`陂b-"cԛ h8d-VGćxfy_7c-I`銄i7b8&g7;t(Hx=;7<|ֳ4(#G ˉG u-=w rǷFiݐ;Us1 BbކUSuX=vFh ]ſ+Ccw:1A]Xw|õEvN͡L2{zd=>h$mzNҙ'v r&N-p6h~j}3]̬Ɓ1nW_J_2J~gc@[ W` oXɧ$AXX4?6~)P%'ؿ']v^,~ _}w8u֢(ThsGm6Pw͡F@.]Qmߌ:$~ЦdqKjs@ oiedlw'es=V %w;c4$r>zL5b&2"Vtβo+dʋ9L$M`?͗ZL9`P$O KJsxw?WA$GNl\>TB20a0BBՁ~m{Y0O+ETS)% I1mG,R4eO8"Hd4Dzr?4sL78UzDTK;m`Q22Bv['=|^[@JA.%5sǕ,Qd -!#M;7@C ,Ni %wU:cyWE-ii&wd#ZW!R2zq’j ! PCP\jM 4*ڡa.!.y k6U,HSZ+rlӞj&b5eXn^UIB x%<{ouo`- fP"c*8[‰Mc*#┋PbwvLљZu΅ñƠuIMzslNixێmq Dp1aMOoWjl ~(^Do*IMX7I 4R!/_xAbr'a#" v~)z(a˰nslW{?:5/3)$go])""@'߀ytڨҩPBKRʶe Èkb*FC?$!() d}JqժxbTo\$c61L}(/) P~,.hyʢrvmv6K-FWi7YKugD}ߒ-F Z}rmau1$MJ q-NSτ4f#q~/җ>#F5+NǤ5=/)TF'32`)*+ˊx!H̛gX=g}|W߳>7HQJ[/kk9VHg;*w?;?e0Hwp`Pm k 8C9 $f`((C¢8D~HUz@<E@A(>cD|Ks!c`2SE,$LaK)vKUWU7'À?y p阯:@p+>f2$0X IP1Ht?4c P%>1͘}ɁT>HDK9iiGrǑ@_l:ˇa<2ӯ]KLӸ|ݏ>ɲe\6i<4oݟe޿?rqlxuq_γ|b ewFYpWhOz|K3?FA 25p3)N B$L篚&vԮ\5ص㾩ȭoXЕ*ӧ-V/dʥ*nNwbi?@KFΥbSUrv}v~jVM]0(K ]r?*rnr~Ejf 3 Ypԛ|T5#pk8C8E_U1pI ͈Зmpg$AH_Fxf;4GSw炯GNET3k~>8"mH JG!j;94ԿيO΍Zj9iCTY/NTLX*d*TD(Ggٙcٖ֩WPrlU]/C!LnpЉ4M ]d} +ž֖mv'pbcTA 1hy'))'Hwy+cP RjRKjqσ8 G C&b TH3Ū"JJP!%B}pZ[\Hf .l5&] Hb2YlA&͏VMt=r6blﳴq7.\Rg\$u,;Aqu G8 ; g]ipo V {-P᝹5 ƲuLzdNLj-g]VZ S) ӽq0OQH̋KzۇZ"|5èbS>گh VL!̫1>n_( r-Mև5 L^QB5!BolM^(/E/wm4ɶŀ 93gB_ضʒGI&A!e%K[d_F 3vDV}UbU5w&m0-5[H38ޘׂbjҗs 7i9Y ݏo )DɄռeYғqu) q|i[n<1XRu͙P[CB޸:TN%|x Xt~6T]ɮ<x16%R{ߺm M!!o\D)$[$k %aA6nuW(UbBڸ͍"h4 G1˪F}1{Y:gL(V^F?s fS. f!QGeV4~N/:l6?M*^C@c`lrԣi r}d6 j>.q47P0ybR-Ő*Yž`Jv`&f !䑬6atBJN _ Qh.ն@u^}[ԕ,ue9k>(6GfNiH\^{q$NTIBcgJ2OIDd#:§`(d ˔$58GHe;o)m5@[G8IsoSR2Q 1}.1t;~Vaz5EPkfޏcmgOŇQ >:(y)T^=`>L%'S]lzgh";˷tQ!krrxw\!3u&%hdrkכJ~?%ͳ_no坳3cvaFF.Z_b潾 3 JHH.9I3 dcq*4HU "2(VH"GEɠJe:!1w5v%FZ#">N+H|~S)nRU4[6gۈ(1ac-1ouyql.~Ph8櫷G"8W{SΜO}əiP:sqQ+|ĝ ]Ԫ,{v_ GE2YXE٢{1;M{:[rR" 7؝HJi~UCt&rg2cҩr aRk͡_ZwY}Zl'e}d. uQ7شIRԘ4I"xgQ;(.ylYLr^|T41;^=m$4'ִI%0~dg-cl_^k<>?_za>p T+K8vfIG7/;MDdș 4 3уZCX֣C3ƺza@| +ZP5&@ cQZI;"iL}IUW*2s0kV}W_QJZV"yR*@D)Y3J°?lgZPӰ*ѢmFPV -x4KRИ\?U]i1(U$&Tn)ldՕ FkXLb>ā*Pus~*: '<77%)k(rq}e6(ǹ&ʃZxrp{K$4c\\us}^^P@"l{zDNB!oⰭ&q1Mhfb1__!O8?z{zx@N]C@@j Wn ڃD@v_l!b.3g5f_Wn^Vɲ$ ջ84 y`Lp`mlџeyCL,B) iHDLd(LYƠmX^гťA6 ֨D9ceA+>AɁ4Vgߨ4tMsxJ%tW0vW-  u ~ò2_/S_A YhՏaj ?>렬/HIHH*8&4% eH`!ǩ8@DseR$ɴ=GQF/Kep;Nnfjׁ.}`kdc|4-c-{:~x#!)<]̄uҥ߶E٩b`q*e,BL/2"YBB aqb,B@cbF)fZ]ruuB0:ۖ|""]q.J fgLYKƸJ qA0lGq,~RJ j*nymUX} zf6uf^MbjҐ/MH}sA$1y1FTGfOQ%ZLVV!ĉhX*IRyJ$bW"FcA2/DRPKۋU9i}p}5mK/6p xmSi}jظ;9@RKԇ6Vqwe'"HѾh>lDFK@d1kE3F0WxHkS=̰yZY+n-Opryqxs ac*;Z(ߟB 1gals\jcy#7:TV7{e5ZovDS8vөˍ5ew *MkSJ IH`C+JC{j a&- lb;?XEJ˘FSG?P דT]'0rzagc,3a5cpdAgp;q|/ǨiIq߻<~ur4^[0W\f2AEv{]>_M`|ene` w=sS7}J+c#HUpdQ UDL4O~ï;=Hw@Bݡ9SY%Gn+}Hfzcgjk=*_4ѫ6qYWڞU8%*b{^JbuWi S XrB $& (S#h_*jo6xm=; NAiFY!XɁxrQ'} =c1d9Pq8bn)gkF֕gu$NIy.AP $FmW>W@NyZ;{v#E俞gG.weH~٧x@?,{c <]:Zn3`u̬\hVRɸ _ZU-#BtI/эOp+Cj`o!K慐H/ۊܼ0P2K^'p 휊%EUGAmLL)e8S2zʜ$iͥwj@7+"^%SO[齠F)sI%N(}\z N@@ gdDC:;MMI@ ̕vtM9# F@R5h `'%8.X,`%w*FNm_[a-(P3i4n5PMW7i 6ƛ'jS??[ pU+s>8c+/:Gsxyy?F7WW%`>D;_|af IJ0chc^<]j]m{ޛy諸%{_,W}Μ= o9r*KUjT5.hf8$)i'b?PM[b.E~R1 w.GݠCl׫͏R!ioVfޑj:rӟn53ɴ/ yx_v$($IPXQyIڨn; nC{w20Sߎ5 c^Q6_Ju5r'x;_: 2ͮUɉ%śůM=lTo|Bn|fuXxQo JUXvw{cNF%5\xp4`hbDC8qb2{Xw="M{je>㝥.cF)!?3>Ogъvo>vNa$Ŝj@{sg>?6N|7ǯ:{n{ 5)MШ׈ %`΢ahAs1&.i772~s bA@v',T۬Ͳ$dz:ެSjY*ܒ/VQV5z4/׼+]"!<.Erdzu j^:*7gԧRY-iHʵWb}!I*MczcLmepeɧk sޯ*Fr8Iۋ[)GfO$AUOnU?l=W,cEyfM~y˯0Yx\Ę :dGi\.|nk]olϯkҲ rV1'+EKeb hobN+߀"˻==F%@R-ޤ$S!5{ɺPnzFZsOdO7ύs6tnBiU7/{_ěgo ןХN jk[7\=yV͒bH*ǹsT0%-6֯7<~+Ftd?,j8ڐcp~~||Waxsd⟪a*LHʪd L3o7ʫH9GSww&&lBΝ6IA1Fnm ;J7Y{/M1MɄ}kkNk4ֿZHqPb)r<@?:5)M„3OC*Z&8Wb l1QOvvCٝfXg0  gT)K1焤V=59ZYT{[B˗n6g8Ϛq^y"rrьFM<'C h0zZd@`vzh2j*Nr5_gk|mm5SAW$hʼnAC2 iК!)FCpY͂͐kUAۼ T ^JT.,~my?=xfr]}Wz*L5ȫ6+d]2ʅ_ڞۛIJ}} 4XQ'z0p-Y4T e6)J|$GTC}Nrm㕼.N* B`{:ìa(3͗v _kj n ߀*Z}[E_^n7`XK{ *W纅R7Ew>OAmoǸ[ b`*{xlo7bE4{C-J)5;^?FUJxMNrҤA[bt2)Ϝc2%u @Y)/LdDf[""\#zlnR/ D`pGmޫ/KE[oJ(mo60}~.Fc Y6g;pD8Mgˇ-Ίz)V3"0zeϷ gTBC+Nt4hQ(qɕ4hՍ ;N^8I \`|V.2mCF'H$][ w S f:<єh*yF=d8TG{‰WhHJ誨6Уn+ZzK,ˍJ#8 W14pA O c qN*߶YH$F7.1f;RBBM~:҃ FF MR& ]}7וQBR)fq,('hn=k[Jtpr3#AϷ3 R%p +AI0e&~w+}`2LN%Ĕ-O[{EOwA[;o1'~2:)ר>W>f?%uPp` ؟ czu;t;5Ѝ*?(4M8*QU{y p"O2_-d90```EF84HS'U^Y$=)'(Z.&tl)-zMB„f]n\ɘ>I]NcUi&'/?ҏ@(Zɣ FyҐpTpԥ Jh29qS'ω@+G:'>\qN\$Ή[ ouHRjPm 9 KHRi*U\'n"Qg7Q*KyLNPFklUl++,uĉv[&e6g('DY R[/ҁ~@rG$a)朖s b ]4!H(j]Bځ^MškU#ѫZ`;YEd@nKI:76; WY(8a 6 ʀS+0EرKF;Ћ;bz%!DPndx̵2({[;FNjQmԌ[3g?ؔQ#FyޭRES9F'vu tޭb6Mʦ4|Dt[ JLv.xYH6wfԻ a!p)=[T3F6Y$(f4%1Zޙ4IG%W1eVP %2'jˢNIqST#I L BZḱNc8Pׂꭽ/xXפJNldhF`|\QOވCH2"QF#=Ͱ02=z|j߼NjtfSn!޲m% 'uam&h 1mԀU1^b#{ ]\YtjEl(DSqFI ֠t\pژL@3C結tZlOb@r:!NA;ϐhI6J&IG IhCϾ!XP&B#z|H@ h(%0.\q$hDb̂ oe*[ @ey5-FR58&_Fh"<4܌RI%aᕔymRD"G&F-k2di|ʀl3rLbkLJW $&]>!4hX'# 7 Xj#{L`T h8&44>F3 4g qm ? ;w;tk׭X_3vo6f`D=/=x}oVSz##\|_wemI ;CzpH}kyN^^@[ @)4jT_<,YFWe~YYYYf^^"a]1$Kj=ˮ.DW$lAGmBrPE!aZt Hz&aBIQڄ5mVs$= C*2=B*m4{PT!Dlâ1jX`|:cX8hHs"` "Q=0+c{jO15CW:S3/p%MGEzd1Jg{nkp]%",3^"yX+˝Z.ٔ1mFSP+T anvGȫmb#aRqvza F +M}^ia:Ukv" jŇ41п\IzŪ38ߡm(qu).#Cyb퓌0OVsɑݚj.fL(G0hM}ΐ0B)eL!ZcQyn)$(ل ܢ` GwkI IYWbDQ6ĈRtikHQIQ13 QۑfwvV7ˊ!E0)T!Kخ=) wJv28z&J! m Mݝ]ݝ-9ohxʹ"?D)6DJ:@X.*efyTpc[缣KYe1{g71Y"=nbY d|L͒֎RjrȒݝa,8s/kӃ+5CʹI>ZyG%ZIL^xݽpt(OM=lDOE?8#Ө([]l/ GMfAyP9yZUo3+ܿD2e;ƸsZ3QuZ/d"݅ZZ9yU{ׂCnN4f}͉2]W;t7S.i^fzx.)ET_8ٯ,lw1mXW-H+jfjfeWmNûΎy<:J/:vFǸv$\67v1\JvPv|֕Jc89apXː{$;* 2.' ,y:RW "HɊ3hBKQ3{I !$t 9dnAzO!d_^U7R_?{~3hmdE߶>4h nk6aS c>%~wT1ĒebTu3w1t6{J1BpGffXbW:*?u%\nz4o[gN]2 ~d)C$8)hKN=2Q;>I$YQQџ>QW͓L׈gPtc^JÌ@FQ(`B["8!+ 6@ՄF}k O%oy&^, Ľe6j,*L!&dt6T0 fw*H Ey%i\UA$OP3y^؄ZQih<픍3"P𥲒H%".2_$\RH#(q@R>5g7CfFac)؄mLZqdߩs@bqjwxwzm&tH3 Z;dZ1Ǥb8(%@P`0?x`$#dE=DGWh %ZK:{,ðp уEV/hi\x_ A qh0E0hh5 T*-iC9 O'hK"&,gOax&D4lK+ YH{7wsNz:ߺ*FNvu]o߬6-l>of*w?H>~QA ҏ10ɋkx1woG8/ ৛;0I977Չs?s7|`ܒHi"$+ӓwo":iwޱq `A';-jsGB$ د`P6FЩ3n"r"T\+V0k\'zj+[CV@..^0kЎW~uT gqݍ6JK՝̗ U7iMW 󖲷 "vEwgdL!ّv1_59d(#XYᱥ2b,~v (u(`?,Qǒp*:]s"KX"'7s LU@-AȣE&`1HZ+)<\Rݍf] YcO|>kX% LB"L Y*Tڎq0WN:ÑR9c"hif0f {`2R!j Rg(V[(P0kŁ᳖M|NQ~oS nrA!X  ,`8G븙`6@l(?ɕM>zsT7q2]1mylgUӻpi,YT]p,Wggρ ZA臦m7^ZSl$ar1PwjD-*V%庾+ wlëNRyWa0bdɚU8X*σ\Og'ZøY;z󇙿97$9_›%?yaLjyC+ Z&>/9ohxBhg )o}K#> mgO2wq7=$F >~MHv#pDwK1%;ylvVN+N&7}ddElAc+Uhz&+uNjS{3`L៷I7Oy͓yϮq4aX*2䅓{U^u A;N2FŁB䟉O+ޢN>?HviU iҺm)F_՘Y ly6fIÝ,ή8$Y9)cNJX+$fc;hRxE92}CS8jw iCT0ƣ,pbQi:Yr∢%܊4w qS1w5 9Hw0GR)G9L0iUІJΞ4Z,UA {`5HB9#S*14HvEb_{CP}S@P!ceЄ;L4Ba!0 #=7Fn%/|dohٓ\bn.F"V2UW/Tuop4w}n[7c/-m vt ځHÝs/>7hkePKOa+S pqc[/su2_lܷr =iP&OQ4;eO RӕW} 8B\ Tzhw]^:/o4'[/V Zw|C9FKLFR' 5!'iiZW˓$'I.O\T岚=EJhXNs J0>r3e L$iɥ%~0S00.Y 9.Lded r-( ˸ieTϐduuX:C Q33EX&}0+18woM(ʂeb=|7 1#M[x%.=_G>*/kJY^/T+UK}arORጰݷ~9mCWʛ(- RSN8p)-T;,m 8ּ45lgP +f_E,)p+M4UE8jABypsd$BL aأȽcL:`41^A0gDR;A9x"D%BMex{)I׭l0ɞSy(@+HҒS:휀t 0;~t3{sa 8;z ~VR^:R%|?QIgzI,E3;fVtry/6uys_Go'~f'Kw=yja_žrll)6H+|*Nɪ"%]iE^\-j$eAs_UZ, q*"Ov S` R”65=_j޼aqsHl9oXc$jCrf€ bиOi`Xa ة#aJw`(Ь8- p,55=b#g^߈gpϨuWy#%f&  XO&*jbt4Gls=pwzСqy.QIY\Ӏ-3f ss HNʥ]Eog`NfŒ#ocx'?54ŷTҜ 1`l GjkjEB 6]"d-v&0ᴻ6 gE6j P>8<63FkmuZ@ D_~A}<Q:)yR(1Ja8bR3j3Ie9 fXDB0yɍeOjJ`~7)1fа<` 4* E1)"J7|5a`k"{)J҂xg%ɚz )Z4TfKc,hyD \hwO`a1ƅ3W7xdgCxuӹoi2D$ 5H,12 AaXvXpneOuS)wkpl[UX@A,we\ȠtN:n@%=u?f {~:_ܚfFsMWᄥF%3+JNŤ]ÞM~&Q"^mA/ۋc7>i) `k ?B |_, ]2+lo>Nr_7q^r{7'^؋0Ɲ3B0߀y7Wϓuw ?+:}{mR6rzW 7?&j}owףIE g &{~5;xxiw nGga<G!.Vg-++b Qi0)_1F1U}ࡖoCy3U:kٺ飳t0AS Bt.h 棎Y,0=j`Q; W`tH )p.OHhW]N:,Xtsc< ĽEwg+0yJ`f p430K "΄!kF^~VpR Z@QҁOQ+FNzhwж1Ȃ烶y$/:-@dy_O#Y |S)/:R MI=R ~EL`W.Z8(0f`CJ)agCcOB"J(&OcYCOƙW˔Hz4 yS+3ROR+^3G3eKP)?lýuYQs7j.F6L،k1RiN8pv oxɴs:$0(ƨ S 4n% W52oرOɹ܅ p]h JD@nz Ql7Bm.J/e>Pva͆ #\-a `i? "T@2,C< w+/s-҄iH9;?"p`B{P ]'E r"uI \z~ \%BcwOg;,onzpp˞Gn+K5&*0$U~ #7IC{+%?{6AcNض]ضb\Hs4 (9I4d_2V;9Ӿ" v3)bl"Ħg (c{O<&nw$|֪CCJ1{S70.ΘROqqIr>E I`e'ۃ1RY :)@X|f-*xID`b9*Ybὰ`T=j=(@6r,)mƙq) Ći9jbQ\ abocK"%Cxylk\|oÛ6S)+z)B V7=8A|EY{/:\8l<Q-D=\[g.K6ED;܂GD+>:'whM9Ei҅ͭX-7::6o]4 >%y!AE~ ~_@z/Xܟ$GvVQc,ƆXbdPb-I?`py_b32kN>jrK GL]7\-Q*ӆD9,5i(>i"PZJzr7 miAJt\yɻ,K v`$al:tJQWʕaI:9YMқ'ïeڥelbI`GVŭ CH֒1hb)TM$Ο}~:%3.i?/.ם=Xт F:xߏ}-~ YۅAK`v&I#81)ah*$IMܛ85,yk)M~Lnr݀LyH1-+u'r|'5}`KWߺ-qIsKPdaT&ZS8SDV$5}V-q-(` È@7 [5gEh4PGbs6FRHz=g?KXy)\h.\ɰ0CHy$b;$⇔ţzmtPBJgʁXAGZ&IП,̦`@5J4ɜU*dے&XR,|f2 G 02T 3'=8 ! t6 I ,E̷A0Ի *$L엫pgهlHk}ma˧Xk%j_߷wZISt%|uωX`Q;=|&\{ps{qF Iyf-w$ )p>x]m{HZC/GzGKs%!$Y/'V6`g6 vÙ VbKl~-٦uE꧌/WU,ާ%N,| 1ۢ qQZ9Sj!q"EVefs*p^]=K C44ߨ^Mv9GDCB9aoD#!i-ZYZ+;&n;$/`"e@rBThw),t,99/(O-);Kcuus͓Jcz9<+2,)BbLŽ#j3"uEƶiaLReoL$"%cE€F0/o¯|]ۆ"eL85<`hEÞ@,28]bY! V`Cci>Uљd,1L: U7q7Lk2yj\H>q@ 湕v@8zq>_//vbY6+eŶНdwut|g .LJ'*4oY(|a䳨rV~sqHN_xix ;(qRxۢޞbyGmrHNGq<ƭPq;wZ4O/*wPb1_1}}EmQy;p*|;bEslSEPFl껴ְM][ɫX c"oxG:q./Z$RRq;Qՙ1^C+n}!f{НC΄H&eS>bJ2Ҫ _S|5BédBNlF\Ln W6!>:[KcǏI\ 2qY,w}//Sjd,zzb1V04Mi&&-j !BTtp^{^ oFb#)3t J#ceQ' MdJ$BJ$u%92r^'>PJý&ƥu"v)=_>1z'0UM hlF9S`b2(c$TcLX,2Ma+nQ{}9QS JSH-T@j!LT8> LE/) 9i{ pa PVa 0BAК9 ܚ-f Қ*߭>|>ra._^ s?dtnS}3OVyPYMG72XֵKT{KSK//qSx\0UEVNEڜ;#&XTHv%&抴\hI*)x^(1h_ X9wZ{kչ^+&In~-L)q8wϹ6oUcIRlT`bđIeV{Ht5PSX4N= 7R_J_Z__[=iw}NbW~veVbNX=/tϋ' ~~D_N/Vtbw*,19K*,M׀%F"(#Z:Y.N/db0=//dϋa'{^e 7ժ_0Cc}t۱mp:chxjVwY{~Sv5;7a{ $ ˻,~7֝〽&qSZ [kW6MX\m|<{hG:w%{U;_ 5Vuqg\{y\vveF֊(0'.h[zfYH"J[ژMG B0G띹$Gh~ NA\TN8Z!$+ҵB!/ _;GkCp+O[yHZ뿠Q+ B1M^-M 8 p{#x4<~ЀS+-w;n#c/mWcFy\(6_7NaD#*aT.&R}(i%NVu )y= ?jaGV-zF] Q+8>RB|˘n5=GV*ڻ@ܑ.z .B$j4՞JFޕD٘cX)5FIp)l4,2)@Ujqtx0 GGfOe>rnc#GHw0+AXK&9oI뀥H{:`/ sdeRLKN4k򰥈9Pq"^*'_IZp^Gg T1\r}}dC⦵ray>>rLiR|*MK6DKq}ѱL-QJ"RP5G\<{গ<0Vġ[Že2{o蚄W$%/]Ic6ߑ#+NUeV;Vy^U:_[LY1 {Z Z* R[z;Vdִ\ZPNk30^+ #>)]##}dn#Ǐ4n¨o9eY`c8؏hG|v;XLmQV6l~o`z oG?遼 ̖7)bMා XWOgo@]Gޭv}Fn'6L^1q-?.Gv(~ I0,`!O_Pxv(ѕY8T+ #!'.>2uз X7Zn>X9ͩbmݼ'ZֺQ/rczucN A2G ےȑj̺yOu !!'.^2u7WnvúaPNtft[ Z7ωnr"#Sn b8wĠԡgݚtKv~;n$EK0FMJMNL}1(#:}Tn r-6UhYBBN\DdڶnrmL=1({Qig^=)T4yO4zuFٖuZ.X7_ ʈoU[u@9=Ѳ-zɔeen:ҹ[{)Sn-}@BN\D}dcmAѳJ[yB)ڜ;Ҿ[ 9q)-;Wak5y.1~^=@xsGe|}Jܗ~!_8n1#7JuGͥr5{q'C~ϐBR{cL 2䂄ݻS&!o &P{Wc!cbA5AŘ9V1!T4F1s"c D0ѻ3'!<Ęj=1 Cy1>7қ3e VCb'cŘdj11栚 _!c {S3# 1!TS]71fbC9&pcf!<Ęjd1f%bC9&hc` c `ܿ3lcbA5[3b11氚%՛3Wr<Ęj:vfY`$c c!<Ęj;6cB s`M\/,dCy1ul6czbC9&`BݺsH9lj`*2&YZQ'ffikl>_~j06է7?O'ּ?L.L~>].R38X)L+9i,U"jxoB8NA Ɵ(:Xeԙm쭝4d i۝Q z/t2q(>ht`,s0u` Z3 H(FMQ<7wo&Q^d%$/m:^N'.36*XŵټO?I;?^5Nk+ 4~6{ޟʷgo 1V徨|_:V^,q^h,~c"+'˻l~w]ۚt2/F-"1yi,eY۳ ]xNL 뾏7JNkV2w_}֗&-\)"l1q( J$.HSgJ{9_!e0;.*V c1^g@SER,JXT%6jX"2<^FEAaD ,1ϽF`_t'o .H#Xm%ɝAYyBOܾsQ"ߴډŏ#Ga/s :Fc./g }h ^^MmCBO24Ta:BT1u,ў~uMá5UU{PG:fe$P$uA:ThȝJ@${` &h#C4)hv=3% 3VG_g82%gտ?$utQ~h?Y5f+/)(¡N9zR5z#?G0 ƃfǨƏI;0/By/ c Zg\j ЋZ K.R̘bF,'X]ɢA@I&mdٜPۄbzlPI9$ dy/BI&Q#H,e 4RPQ(EM26k rFЄoi )lJ3P pgr9B_I4E(&*0s,`tf>0 0 tx  rBUtxL<'lQ[& >]RY$,=jBxMVfXFePb +Gl2_8eaZ".C a~!>aGmù٭;.K0))ۛ˞āz9Ya6LϏnGf[[v3oV篹0eװ6S߱ΟjW/z_G[梍n@Zl6ˣd6 zXF -Bd^{pO>;o3h,_MǛ=;g(z'0J H3zEi1@kEgnqTj1X3pA+d7ٞ/I03[ر1"j:^Tk`!X`ONf3=]y L};vr>NN ec:jήM3r^koZO'w?jbiүTaz*A^JлR)8r~pўpm5dfUT` X"kf.qELy^YgdJߍiB5t!jBt4UTQ W^skf--}}^`P eaO h&$%}4^\—嵽-EU*@UOp nxsvD;˭;41Gµ!&ñyȧSomƌ56g6˻n^>j&O!W<3k.F~$k.lC,->8N a)@*UX bѴBf`WvʝE11dp.mcfQ=MO_yR}puzqǪeRE|a5+{)>8ՃiQTywuiTRh-AbzGWNQWφWVIr~ckK!ҝt7Ml{_B[tseP5mlEI΁́R[+}1"˖0|k08js'7ͮ9N^g h"8\)Ԇ& m6Y!އd5ݛgR`) O"D)2`I#R;lVJ9پ5ZīܦIP@C__**G8@F &n#M&Z5 =𤥕wR=.:e69T; R{&H6 ?#ƁEJ)jVZN[Afֽ1MM3_5\P,|^"trѣR S͉ hL Ǭ)CYC١Tr 5`=ebֻDasGW%OЪ $zD;JGVu+p6H"OLs@B^`o'V2迀v/m٩ 䒐Abod7*6NQg ю?VO5IM|T%3M0질{kz׻Mk៶wO6QF[Iku郫pu7*;(fj]kX*[= (ٴ9#Eљ21;;ƏP􍧍6hW5o,&[k;OS|VHsM+.CF}!2]L`8 /,ތGcyrT>גP)3[A _gGMMM-GkX2|@$3yk_2-'0 X8P5$@h2ɖ&*.+t{*H1L 1HU t]r}Cm{̍Xv$E WG>|HN c`ykQW%cZwd1Ŷ╜Gwr ?݋.: z03nbsJvMl,};}݁5d?ā0 {J8Wd(-w 3g}>:R3vVn˅V\"i I:ޢY~V<*I(Q; ݰ@qrow(qHRjyt^V4?SDMO.}PLn9%Iђnb*Ay{5cpPAesa3A>[!g2l~~^8p?6+__JGjﭪO DWMf]ʧ]xF];3ե^AK6_NĎIsԁr"wyOMR I;DA27]ݝnhIR۵w׍Da<,b !9>/VWDo)QR!Rr5$jkڸ쿨꫿ǘmT)vU?Ѕ/Xc.Ƈ}  E~LNBxq.r*%R0r`ϐF184>*0P:U;U7-t27ip\3ੰBD .S܄HX~&m5ijc:z5N8ry}vwԓݝErhbT)9t2E%Q{Fi QK0".V0gM`gc7P|odWL0(? JOZ'g:@1Te-D=lA֫ǞRN3W]\縉9]ӃTxX*Iv}):SjOk{.0b":i|˜Ze/LF4lX+ smz7~Y&?`L`]i㕫+zG̤: ;X%p8vrOדF(ڑv=>Qr~sw7hCW0 T;5I$"@VynsGQK# ?֗FK.+S7rrA1&qǓ̳?cbFLUjYTk~y||tKO>o3q?n7㏛ǵד!/=7ٔV^koZOω~@8 ׋& go%dB䨲T"B(VHYKroO4<Ȳw:|+H\H#LXТJoA=LSzZ~\9uua)n*RDQ;E%*C"HuEJ;6i;.6h\6muԼ[aE=OXסE3zG cq탉QqcĸpcC $uU[aʩ 2u&`4j423>74-\[29 1zfm)uD-Y)3u 6Y ,2ʋ .Em#_nR6k+i6Ţ pVT=ܤF} )$4%Yy@,Ù99s<3z6K%j0Rpd!g9X +'K-\l6#+QXR*4X}9nAYKW`E>!/<=]/? a7J WcaBBq˩-ˀFFؑ%6Khf+Hư՚vI [h,Y 3YuK`t"QBa!"Bzc1,vdu"8RRXf8Q։Y*HIn Ǐ3vɶsFR@9/r6*!zku-,^_cXrKRS_2o/֕iTپS[CFFp߽Fn>^L*5c1ʥHn`|5^j|"+_nJǗj|Wt ՏB-ۦ&X~-V9WO.ΰQfIu.Nvj+"AHxTE|W=ԫdKUбH/X mWC Gj#N#djN3\JeJZM w15x[5!0Y+W,Db|$r6arɬL&bUZr(#ƬfYZZvk!YL%s`w:Ri+ie$M鐃E",8BiE2RqY|8CUlM}p%ދNaJ\o@VPo&;"Yp`S6~y?pm^ |[*8~'Íc3}qߍ-P=Gƀ"D2L%M&=^tDZiw}=-dWލsL6!8|9}UMOAA֩;Fx.Tl[t|,ZruU_n2ҭ.ʰN1H3,wmVвmyc6eG^& 9-d=:q'dU2=DzsƯɠ>+kGJf3|FOxK<^`+F۵ D٤{qb ?Kn;:/LfkzɯѧM ^nFFG{0KW8gBsV!Ro$+:?=Ch;)]5Ua)cM !؉Ȩ 11( \ttfm8[0{hS<ۻãQf~̹sz}yEū˳W:o^_Kx~zt9Gc5H`ȥ9Z)fj@Nt D' vs:.ny=*^sct<8x#Bz%&׊ƻ_atóp {7cHv*O/qS4pw`EgoYh}\/fx >?}~|?^ڬeYzlf~:]Z AZFG!Y,MTXd<đ4)Q$qLpjs~tqz=yq˟^u2`u[|xm}Ԟ:wLCwǣۗݫx% &-FѤs'΂~ 'ҫJ!h9Bwovգ@ G¼8qt-@eztg3Ja\W? t9z!:#kz)LLoޞA gaկ`% G51У~Ϯ/.9VmW6 ^)1FEe:(!{^ل$y~uv!oofdcNfv^6rET #s-EO_7EC?avii,kac{w0ۃ@oVc{owiI=H+^ *I,YHĚQ߰Ol1E)A޾}ڗE{N1߄e:XMqB %LXilj-ˈ%8:`$jæbv[ݲ9^ʻ07 BÌFySF@v ҋ4{흐D{ŸOa[j%q`ަ`y' } )6ڇ`ܶ/@׾2w2|@'̭|+qwY=3Ԯ[Nq'$mGMcrb LD#%bI`%q!r9HR"S$TDGc]Z\bG_+3 `S4@VSAr V$@]Z!Ұ YZNR@j_1 T(&-+UOXpZN`V)+)>5,!/ɇֶYՖs3#91SƁ$t0,Mb"]Ilv˓0Y9ģ5]cóJŃerP_ Ivmm E$Gd7µIؒPUdPA('*C P$[W?l!4*ODLu4.P~K&@YHcܛ&Cwi2HbHtQq ZI9NNDrr1r 5g>7HMO|?r0<9¨_pTC?SBpehT9[ :A6[Oзs̹铷*&)8/8h~1*ׅp}ƷjR] ɅPUp^HYeR?')NX U;K=}V:XW~<潌|B~мj!?'v^|FYL=3 dpcA!rBP~/x81sR2uQ;٬J߲:x1bװobXF"_֘FnΩe+ NaXq|KEp"z2B6׌1=yc2'ہ@"J}9?NEJHey8}4W9w/VAȧaœ1te+ha5ͲTz&Ldi]Q#|!iz=.dPD=A.lzҳg2aލ'^0uj7^E W# 4)lڍ[ZGȽ%q\F*ϐ8@fr3@ڵ ca [D`@D]g~G+^=UK;Cm04v| msOpn\RӰ3Gg9i$$-Ў7zu:{:Z>@2hlc=X,zfXO(JD.pqa҇j˜L+K$T@(si`̓lvݨSͮQTCЁf`ttth&~'e= zӓfDɑB ŭN^}зd[]zW\O5$;u$[@Zm?88qPVځɋ {b{ Nr.,S:BQϵx>TwDF0%{4?6hS}rfO,ə>9zz@ik/ UJ8PO}_* Di[JH &n) uZGɞk|gޥJyו*f޵L P1RCS5ʾ>8N3ŞLI-r8[-arU&Q>Ǔg#N4N4g8)ɼڢ@sN쇹T7A1&]At}"oCiKȌMhWN Bh@)mLpb[&UaJ|+2-|%]a >%2f H3x$AG`er=/`R0,e\i7eoSD3 X |d #vLTKKbJϕYx?ϖsOVvSyIFߞf ""otu0b6J~;g/,%'?UIg}sc̏32ȘŏwdĽ9w\O߹Owx]g#- *;mL&S F̽'y7֌ό0"vx=.-ARL;ɈcW5mFi"yl IF5r8d4l.>Gg"URbuXw]kҜ'k#P^87 D %4h EQ%I ڑ \$5Qu%r(ZDZ@ZqIY?E{b*Ji=ԃӞcE|$@$RmqF'5d!>*cPH3(8&5lw]M'"F|9p9`ctdR*Ay"$tdxi &іRZ+q3lw%3[wf[݄216ChD#]!-3[FaPDD/il\iIl{; W$bD~UwG;Z܂1VrWp].?w̺lwzېӃdc z}q׳'FA3I\ JÝ=[e~9wy>'`aI%iznOjڐv{jx濕K7WWw*EfȮno;V`'(];XKFYCü;gD\C/nɽA"3gʡH +j>T4(}<ס$5BЅrDty2.o>DAɲ練K@aH1=Ri3nNo|;>N@ \x}*2jjaON@Ě#2!0568N9b a~u@:+CBK_5(P1&-¢F:N5-U>=eDZF *3!S02$< üEjހ zA(.Ln?tMxҒu? A;)dH j&1@R؂ߪDjHah`$E=p7˙4*#OFJgN#xbwZbSnHAK = V2ȽPX(.wh WT\^^}g  c]={PL>}g` `Tͅ kU:/MnLQ +K1Ӫ$ a KIM,BfJVZ+ؾ%Ak 'AB YBP$Iު9WrA%; .;$'uJ>vךhP^$֔qHaA21p(X]/ z/1XEnR9m.OY\衊b9\+/ׯ^uN`Y]oPFjF V~J|B*_SWw- ;Z% ;n]/9XBzh;Kx9mE/pΒX}r@[`4νWG:怎šr@+m$GE`w2@=,jjntMϼL2mWɖ[A $,RCR5Ü/ GRzrxoUj<S0pDpoҷ{c@ dQ 5"ۇ HQ 9QWɒ?j=NF)bHV@)GSMalud er!fB(u&Ւ y !Ϧ#&? 2ewB64!߹WWm립`[*!֣w[z@ք|*So{uSRSnN;b[hw-uukBCsЩ:T1 ;u~)3d=,0! n8<=\.lwVi#ȦmZJa1$xYN8sIuOY%s9`UFȔ&.Dnԕ&%ؗ%6V2̏#jD\eNP _ u< 2+{s)K47\'>xn*骩R$KC0h+@Pܡ8?F5D59!b\ J$rqJߺ)ڶTC}>.\i ci4{)2b=3;orE4VħJ}pvr0|ZK2Df_? ?=6c=<;=8i[S]*8:unPOmyh<> lŭuV |2> \m4-syDb $ANa'wW0Wk6;T!'0\4܎G;zw\ߝ/Xx`OLx[~snЇíK1,ט%YhX ] ϰkH͟\hTD)NM\{FESSJ̻X[{uH9|ס5mI3n%Ih{ ˡ#u*854]+hn2uX*$Q@0( C/AZMXWo~V5"V\R0ѮM||P FLH!cb֗1 iHRɑU8eWB%F<`KM zA٢ `X}7ʖvdr(vp~2ʻ 퉿JUΊqUwC! }z55{3EnBr"[G IvՄ?ݹwTPDCԦ3U{Xٹv?4!BwOo-b_8ԁipޢz`k(K;!7 ՝ҟSƉG2w)Iʿ7gar6Lc-[yB*l@'l?̶vD-bgy7/]/)k/]н|zQ> ?"w<>_tsnrc-v_A0[hdڠR-궊g'܇wu+P.kQKm%8V a)̼qg{ C> тQ݉}4Y5iX8[a!ۇr6 ?Ԟwqkr[,Zrj!2\$ ;F 6Ul0j!l'~ыf?ۧx9Hm9ld"%"&=08!b?iN/ZQHgZm\x-&w %QjB4JeTEtIkWpicxAe^Xv.ǒ#CJ5iíRBk jJsqhQ.e Is">ѕ<߂1"ˡЄ2l,$BLV`/ 5e*9c9ᒊPl%hfc~ow6&y)RIQMNgr0eLJc*UUߦ[R  kh&v\\^l1EyQ9FG.Cxڷ3s DZlf^^F d?=LSm*į 5&* E&_k95Kvwu0}iYhIQ3Kd 2WĴ@آl}0oțoKu(HPa7)˭Oq2PGQArW#I0yKٶTaˢ䪭!gO-Qo9K5i }gaK\2c/;K["{0 izTUW,0,ZhW^.erYH&:4Kc cKgL!0oiZ¢ЕX^T*$M4!hzb{#Ŷ Qy63?{;AI듾c񵉏3Z;}9v$%l1@΁< +p=YvyK2"D8ǰ1 1]L8)'[' @0@5s &X3º+4׹l8cV<"wknݭ]knm1@9gFj05'Զ%&\hY &d. ] ŠUe /IOZ,W菫; VjgClZ~Pw&Gk8-qd3e붨`ml7Nֵsu휭k۹4 Vsd!Nɕ 0K`FQXs;EI[bdT6a͐G3nl9)mcՐN꺎BA9.n匬 q1~sJ7SPy7 G/<O6nv̓T`Ӯ9LH.Ro3@`ä8ޗouL#j%"^1T`S7zA-z*A+@;-2n`k34#4'n:|]Xۍ,eƜџj>2u9+ʪ bSj2gR`Jt"?hk Q][8^M3bѠ&lZ4 z5W7ҥ&NZLKNk u5 n 5s)&PQ!-Ӻ 2 wtl E,􆻶wo><${$'+wwk vwcpFhN16RTJ0e$+F֑_ Dцb_y{L%V0#<;乵 RҜTD[*eUdsfԄ=ж&~ 1P#jbi #aOr 0lh4re[%;D-ޮe5;5k3(ˉO5E«X\^;LiVdwaqCNwZӈiW_?iNpWPI2bB-Jr!%)L4{V۝c:m,A9#GM T:!>A9D {aXRJUB)(Dw}j3( P.wL!a)AcǮg<ۡ Ivטu (i'ۢZ o,/K"5fPс. vI<4x׺A~.O#3BV_1j32.T Wnp3#Lg 8MvQW=y=\{@;D9p7!߹A-ƹwңuK DuRcz2 .hw-DukBCsM)IO~mݴD`R1QwXwbD-UukBCs=^$Z Aӵ?DWFTm{=^pnv>=Ɠj_DF?7许\Apxrafe$sхheAg0u叉$A2Coq5dtH XeA>o.$/nf*ӵ0zތ[?rYp'kP9L4y R(NK_Mko=L|:y*{cb[,ŲZ$, C'3SI"I,P ?|B9!PjOhh"ϊFW}[e#iƢ^Ky; "p:ĸaDd7[ɦ^ZoptԺ5,ZHѻ,p.^/^r]PD 8zF<\i䅑DrȽ0[E ꀨhcXA*U .)7P牰$%)t; #֠/魯ʹhlxú56dc9 C;qԣs|RQ+[BNIU>x 4(XRO= D V!tԄfOSO1EͳҿҧhcC[Oq&Д,wQ935Ӧ8Hg}ٍhh[ZP"NGc(A$}VU".;/BOzx_!NiPv f'Y۟Ű̬O`8nE@1ڸϒb)tyw3p '{;Q 7U}? _F81N, 'CڝpЫ@,#6H>LT`ATWeVnd)(P{prr?= 8Uv )0?V0y{:XJN)SFَr#]ZHR5] hF`]4C}EF|Bה I_WTJttJ#kEE(Aa׮Iv,E_n`&IV`UU\q{"sWjEO)/]4n3C NY7`|U6+c\Ͷ>lzJi/xLӽ79LҡX*P97)P*{켇b6&M{x36_tAz5[=Áƍ_Tϙx īl ^ei NmS`sF>8˩JLUL1JЋ`KS5䏋-4u}Eo*SH1۔6m)8B/r e 蒿6-;4-ɺ ~ŔŧkkoO~fsYfl27on XP8jU. h,T9 hCdDr6? 7u8"nNz.1+9 ),Il(hbȸ%FQ9\Q EZ3f8UiHik9O䗷`n}$-bk]ҭG-3dPWcΈxxxx$bӐ5Dq*GЏ118Pl3 %u(R|  ٮ/dv}y9wy;(#Q-XmHH"!pʼOIcV 9bJA"5[!TkyyS bǔ_-к(Tbw+h̀=٩Y@VHcCI ghR呙hq4q8bɍsDlW͍|{dCuncLOB[H#A ɇFdyf[V_mpqRwy u)1? .[Zkfh ns;LJ|>Opjbh3ču6 ޕ:qݠ&), LN:Bc2Yq2X%(RDaFQ'4ƽw67¬lV Ə3)8YCOIґYeQGY!2Pŭ3g:D0O3{|?Q̝4^:$6 D'(?&&pԌ:J *z˴TIӎ@m06K I^`1^? }M-DON p"bPoK0FrOCd74 c{U/۝"Y¢xyd]/%n"zDY Lȝui6ZWlẺaZHȅcJ} Y06KuY(*le<ٗ8 EKeS6]7(E uPVw٣/B9, N EOcacN2#SyceckpG4\jZ|60Fxίk&7`MfX$w;ۧAWKONkrnD:d|za|\=|i<8?ǵQzS.za`->QoǏvnru*w>_vY5|^$om2\峐=EzꨴTJ&HȨU.SqŬv8%nlcocFNx0DJL)sv,т҂פ<'4NRD40RH lRz"#DDA&Bdg薅(L9c#Lp9wYDBG@h4gi%Di9=׊X 3)bF,a 9, m*EB8%P'E#fJ!fɆ{MMA  "<+d p\>FlBSI܈uAg)|Rk$gVr#Õ%JIrO1byh2/7: /uNw0THtMT2E#Q%uf#fE= P%:r<!Tjl<:}i4%ft͛uxfJ1sY5 aXH$o7_Exw::,CڬO3Wwsc{CVēQw4FT&v>0Whe¨΂n;B#>V!.Pj~`,x~Fz;Հ^ўH|CX:;1h@j;fWƲYe; %  'uv4RWs՜m hJ eo^‘SGkEkj)/]n)͢x~l3e0/ƌN#&$BG)!rHDI>|Z+L2<`8jٌnrQ. x2dNA}~\&>4"` G~<<=/YAu{q~ZKd7?5Wc\^u 'vGO_撓Di˞0A(:ꞀO! +A?"SB4):SJ(OK|du9w1)pl̀Z0֨q:SY]/ˆZ,dM>pY8}q CdJ$j&-ћݬ7"aGQavi24$[7ޟoni57A>S4Wx6`y5+!NB h{,.;/}{Vg?wG# -Vڻ !\jn1ՅX~skw}f{kqqֻ f)ȃ9X$~ID WgA) Nx I"HmQOhΓpRe5~S "rMDk %re!WFzAFUVP#C*AIP˳F~-,l8ֹ]MoxA5Fg;?.{0ZlsӍI\qn|kt)<7}#G]1pmܑHGBqfW!T7s\*ӹgi᮫nw#`=j2'$ #YO Sͱ.j"kI6D y^F("jG sM\Nu4!Cx!*uACo!RfJ.B= ujTLr=ZϺi5+S^Ⱥ_5LYSjV]M",u)*vmoGwM>}g*wvW;:,VxLu_d\|nBޯ<j`7:/JF25*mPZA#1i):b k4ntZ:;@4I|tP&cdy(l%^]yuz߾~׳ 8&U85T^Y2@Gu'mN8Ubq~NZtT[%Қ'2-Y je% WURE'ytGG%y&!v ({nBۿΓ!Od]""pduyqDŽ z"m3@9'T~V!oK> %-= 95b7nN{Pi~j!|duE>+ \Zh! 5D*8oYܿ5F喝[ZPB:>RɃݭ eIrd5az?{Wǎd l`^IfZZJj ',RU*%s!K|<=A_Hm~3 `-X BmUe\[9m< 0@E9Zpm*r%{{ϴ׶bogj\$̜cI7_N^DD, Y/Rg4U.N%(+_K]̓TM:h(J"jO"ݲaG:Gݶ406lㄪ44*FQ]DJWBKHQ'6pҾtVR,4^s;vذ#`?my647W8g|GFLV$s\5Bo~uNRM&(Op{J!rt]WVi9)CU#6+25SP3Kg#%1TS\ ȳ?䁺|wHBr7puj;6=ESnєÇZ4=^O߄d`u/ll @JUT5ploundȷJs>Hnm̽fq^ެZ^[ncogFmL|I+dWMgO ;gTMcw[m|;[m|n'1orUꄄҲĸδӳٚY=f?Vn{D|%;[ѹ_ ? hߩ{<7mO7?|o@)<]N[':&w%8G#9դW?LJGmNpꍜ@OM!5t/e8>Ads Q`xgpP})Sv0|P FBnv鰚cQc˭u+CT[/gi-(fFQXL}:m BQfb9'B:0&:vrSCL yL˻֕v(*W5 Uddz[+bc<߫ OZ~H~mTe]cHN U;>L uUP贴FEɥoF":9x@Gi*9P ̔>%{ߢǣ)+ȩ&fc!K`*1Yֿǽڝ! S8Qk۶3B&s / -E>ڛIieggiCtRAזd^Na#5/Fț@`BݬTOWd,>i&RBksPRQH1LqYX uު{0S;WL!nk'îyqa/H_޶k B`ȽgV ; #bv|SvЃotb ژۛT$jO X[гk<"|ѕ ;ẖq,}uPq^2hjkŅӦ z/ʄt!NCą̀tbz*oBOl)Iة|IC%E76͝ m㧋)A2-'0.ݭ kC:p㴰VKn[}M6E|is.M-țIMOo:OmwoPSs{unym6nN8N}.h]Dk7DpiίvR~3 ǿzŚK"qZL;+HqrGVې]Op8kxGs@, Ђ1UmiZ# ]G4 #\O\;LenGv3v[NsNs3eQ 5fK$f"΃)1Z!#Ӱ<-]((CȌ̜4QOi w|CMk=1" B=Z1+˜D4krSGxZ"8u u]Wph\klz.'lW㖬Hm!6] DJX|ˢhU" hў/}bBm>>;8YGK1#u*+ހFcX|i~U;p&Pf9N)f'kBm07%- } J ,hXCjrw2l#4Xmy;oohUߔ r-]k,Bu_~N9ye%k lfB7S60-q.+h'c]deH/_!n?r۬ ݶM dlrj)g_Gi8mrB!Tm0'+[.xBY{ӹRx.`WuR3hmX9M}p{ t6䀲͕b|ߥ΄l..9؃yOg~O9k"\[V[pG_ps|*ǿOYU"uWne#xҔ9rxq 9m3[ӧ\"]*Ȋ5{v#ZOzmX_΍okpb5 h zRÀVuL di\ kVo&t%9悪Ñ#׍GnXK9|k[ZK0r\wO~) ϮNk*y &~3^+6N4s޵m,E9!^\7V[Nk @gF=8B3KɊ86%S A,"ٝ۝}8E+v菭)&TKV2HnBv*᏶7haJ7se @-չoIL(:{7cWCp/>0wtę֞k=9?XEI v˫![Ѹf栊ZT"*EuJzzܸ.~ׯkE/!4\Y Bn2`*g~mY}pzƤ>/B 5c*4 t]fv<Am7| QEu#V9{NDR!XC[∏Ar16/,RR"\ix9*j>s4R2y/ U j5Ff46 phc02P|t}opy yg׍axR*ήj5~@rӝC4hg^~[55:(" MdF1jMZNʸ\pbVeNb _-áG[ s%R?%!kauH$h"b"^f@h& Rib:͂$cL n2)šjn%<W?GS옑_>RBC,*l#QP9%^iTs3zSHwh QZaYYY",ުpzM[lr%[,fR=8Bb7c_|%.@/۲MV6+PJdR=L IOȤZIfcˤ/'R/X0r.68+g ԛ l_@-;i7{BRQz](]M T#EΔc!o+4`LKa%Lk7S;. ;S9'??og;n96蹫 7 rYASÃI`j9qu"荈x)"(ℴz A[ R&Fj@8h0kqc64NZPS Qnl@+gLjJ3"RT > 7=3EƽQIIL!ɛ鹛Ő*z|L|!))yAe=͜rso*R*[\|'F<(a:Xa:v(F^Pf8fn}ZRגB19VH(I !MhD4CgՂ*Ϫ)Z&\} ʒ!E>rw(%>U*/w- u1;"g.9^- 减U\ZVߍy~sȜW E6 !ȚtVE1NkeT+fCc$*:٩%v4Mb`&8 *ь@ !GPJa`Jkd,cW-T)0W-|VZs/QBߕz1gy0υ;d%W-{A; E%HNL'-Vjm?m}n]OmtgOhG\2U*'pRd#%8mҾHbQM<8A KqfaBYķqBۃ7_Y &4e@`  ɌQ?8{~h5ztbYLfGǻ/8(~e?mis~~ioh!s|ApwgvڝhvO4aPγw}j [ûq>("pϿĝ[ɄY3\~jG?(:PB卟\+[̿ \Z1@ Z̳OS۸,81I?M;>td}1cG^5CNZ?`W Qw0蟙7EwQc_Πq7?/+uNYo;&i_èdeۥ'S15 ?Gw@3)f4񳃣4hT[4h'tQcph~y4c0qQ 'i~Sᚄxƃm^_43}6^|Sݪ5|Zcp;O90oYfAEk7CÐ !|T @WNL3c.dCyi;E9 7Omϊḷ=,ы~ߟ҉_IË(L]v?/=t;x֯+ 8z ~& 럞u1ivl{nܧ̻{g CM24kkZIW C}vI* F㳩L54?^>a!/A'9d6d6d!uf/Ch31c^LG2AȽsV 6>L2m5B?}eSXV A RfOm3pM1| 9{U\񯶊W[3uqH%}nHY pDelO4tt|E+!2*M%ecB 9zW0Rؖ%_cͱ$+kjAZGw+6=4J56cm# lP f3FDZ'gWj揊Zj曡 )áU(K(K(K([F)C hqԕG甼LMTѧ.X})pI\ձ94Au"G :"5sS:eqfi?c017 AGS0E`*bj.]RF}  Utwxby`%Z^O7̜/y[Oً0]}}'HoEIx32O{P ?2t9` IX$+$|$-ށS<3LHR)%ط Œ&\"|cy 0oĘkq^e λQD%52u71ŝRhkw$x"@ 䛤] t7uԫ9h-ӬVH_b%L02#IU{g1E#1ISD)MWc ol to78ª3eyYʃRD <{ fH-Z<+cTDurØ%CD)$^rm q4hһ{%LjX_(wRr&jJx7q!wYoYoY^Z|s(j!BƄI]Bc&!~/+*wޟ2ձSi7:Z\k+[Y֩:YZ:KTgTkWZ\I \~mWK.ոJh3zBBمdHHb((ΖI1kyh}UXb~ُ1:<]xTw$Zu}+ y.#ri-"UfЙÎQT ̔MaE{"JqJW n" `N:Mg!$U?i&kxYK-5v``yFҪ.l N3Rb ,T#A[yU Պki댅/iԀx)JpG&EdB[Mӂ т ##S ${1PoZUgi[%tf W|>43€`1*@i˔QDcN;h+D`  J${sGC@=WbXղZK/He 4#G2&ϔMS0ھZXPqЉw<>>JeGL?SHJ~)*#SI:hKTj~F^q} H#{-o53OYgۃ+VC:g,2ṳ0ѕrA:LL>I#KJFkL))Ò:DJOlo,͐7C7CV͠~9zp[,{Qa TyߒEkCȲ RCJra{@H)a&RڥmSN|S~0UQ/^FP:; -LxmHf SOtȌ tP4 Á-Zu&rkC1(0( qJƕ+Eɻ,[F ;,NXb:w9͢ަ}Q/qD0KM[s[Μj]񬾏8 As Yneg}[rUPѥiT ؀$qrFmq4ԔxRb?eܚۺ5i"I@rJrL;I9g"2^.M}+SL& $)qȖ=UJQ|A_u:apv6,sc3 e٫7Zn7^6^kFݦwCR'ɻ4ZK.-KxK+"}) S,#0IB`-dw/=*dEk"ѵC6+>͹IOF5Cy::eWix^H`)juM޽NP:U?s_f]y97<4jg?-f꭪xyt6ߠս^^kP$8ItXzCyV`xss㒏n\hwego>={3(&^po{Wu`ev>Y9cgkv>BVN UbǶJlXt=I,q .֯a,ZƱ4GK9ZG4>o`H%O[xn,K1aC#I%R(ڙgS{W{"0o$BߓDXbV@;X[1pߙ_XrS>7mho mlCqI\g G.66$۰n_ڜVasmU N?ź;r-#-\Je 4$E]TD73wO$L_9}=iۜu]l΁(`{g Tӛr;AѦ &^R,XY1/ 0^͆;m87|]Lg ۋffcdwƙ?hsbЅq%lo5FPZ(ao(6޺ۇSl\J.6E(k>&qGot_Q(59j\ʎ2=qf֚W G1;(^^~􃏓UFhǧqϥKA?ȭBAVn\/tTr\˵(;UnAeo(Xaųójxl΁rW%{|/X*%%~qe( ;iJL^+7L?ؼu߲IXԷ1%MؿnyвBydM =FAJ-ޯ`<5ӲWqOL~?0 [>6(?wi޵AхefRp"f(%7&hRVlLЈ9_ʂATؖqj-Z !ߞ0j-Z ¨06W c +a?0W c2U錥ԣd@sjNLVַ*71l!r_0ے>6qε1&pg2%)L̨wN$J^J@jRPRJ%[Mۊ o/Ux[m 5LmJ&{etRiSf t yp%/ڵeRcT1;f#*NeVQTv;(kʶb\&'@6Gu 8Sp_h%ThVѶmEۊm7+YmmҎtGە*4MUq] vK^ݹkMʲqk{1PK8q[H5g]s5g]s5gj5T_z)^8mj}IzUi镽y)tA}8Šz*pq$}U܊s+VO(R2cn7zM5Dtc!3zv)Zc]S۫@+2_AQ-i{vmc @O[ÏiH:+ώa opᲳe^@2L22- B @s2(SHkPv{橀[!n~ p+V9(W41_/Xk2+|l8EWa^@R~]S/Z>#  Ԙ &ƤDt9( 9O93ʠQr)%*hp Z3Bm2*<$PO?6?RjiW?[>,ΐJWM9& &'>FagiOqO _怸G=n 0E iyIi 2Ё(Xn ֱgj/ Ay=@cɩm2. Q"CB~6+VXps8@na,7oۼ$4$QDˍ5\D⪒> D`{+83.0T;5[w2σ, דO{ױ;µ,">xH$Q$S[!4p.dJd! h3X 7[:&Dc;Yj !ޝڷk j j js)9UǨl-|/Ai`.6XV84h)װKG-n?SPwW{5=E/ {<;CdyYpǂ: IdnڗRdrr}>u,h;2N%JpM%Jp+V sxYh if>-$ܜE$~iоҋ@]pQ4 wf6^Zo%;We1H{G"eV:67wg)\R,$762l\ U^SS|-=gZC@tErJ L'"ҫB NDȵ"Gi "C@2NJAj+ᾪ%a+U S|6?eqaF,ڳI1hHgi`e#-AU<ڸ{wS%UWNӰ_(a/? ^gXlu.-yͻ=Og枰e71<pޟ 5nkjm=eN} Rgf*D`shؽN%MFLQC.X JLP2l$du$"Qفѯd4&W%Hmu[)xn,;4aƨM]YCkk۴ kl61j#9:,At  (`ӊgz8mur, byk_3 hSQևe)<3kq5$*bV>+0H?ux)*0BXG1.5dh׸%V xxZiC6H:va\.m1Sl!6-X6%Hj>y+f.0:"b|-fc~SEVb}Yy *7E?4#J@F @fR6ņU'̱hL>!:ƽI T)(RR)hRF \P̪1)Z)J^㮊yn?u> N_~K>(lXqN7j/z>9c=7.}3];jrt^# ;;L`xs uJ^ۯO RO..-IHpe;gaIhFs.8\p`zRjxJX3/W>]e]SQmA28[YZ4ن?l$Lhi\{'pv 00R$I$!GjZL[IRDE`Dq!SAtVZr'i'l}l-#A+$uƜX'Iي^GZT+\Y)ʝ q݀MP2E4E74춎hȄG4 @"LDL*0e#ڍykREvBfQ,C L)FI%ɳz,Hj'j0"- ]iE6 i-?{WGrJC/k/PȈ<̃=6 /ZyJ)C57xYM.w ]ed١P!uhINF-o iDZJ#]gqcnؼ#h(e ۿqr| l>'ReW7 ,`k(3l7) ͇X(uޡY/{t̾Շ)@چ<#xy.ߘ[G7jӇӓ\~rL/:!{w5S&e<yW4vЂ2@њB)385DW64e57\91$M&iA7.G % /;7Ɠg,yw\'T”mw į﬋! 춞f ޤ> ޲<0 ٜ N2ɴA zS+ 4-6d掻u 1RlZK6:{pVS:2cSѐ͍6hǞ`i.pDNOQ,*h07D&3 }d{TaI۫- %X3.VN>GLc֒]&Se }YJƷe $c0QlP<N09C* +TIa;$MɥV28#;I-33=.kM@SII{m˗_X(a+c sf(ؘ䱪Z4nNJѺ(B51F&Jc@UœR b%<6s(pi3[z}x}]wu1Y'- ?Xܽs$UW,| J6] :dCXDt [3JAb=D,Vz@de@`uɇ<S)X0rޯ#fCr)lSiv{%Є;@CwDA:` Sęeʮ<|3Yݲw9"^g1O09}!{򬨦vc7o66QN!b4V2FȾ:~^ҾԱUJ X|tYVPq*Z"ϼ6~^@qο<-=sC*ݙ]")4v!ܡK2;%Q)Z%bJщ3Ηj!_#V @ie+KEA-TNϦP{\)X4˿VlHA'i 6_YTX ?bĊXeŊTV A?Z$S{j@II -(u`9Ӿ?FJ؍=LM+d BdwQAyz kB Sii0 i6ӑyad~ 2"Rs2$Se؏\!iH>s `;NtAuY0g9C!T,O-{jQY `r0Lp|: L)tS+iP6+4%u`9%͐LC)֌!ǖb;PV〩j+|&ӟz*J.Kb+u:u)c& gi KW$퀺 8U=+:S*Q[Ekd* Dj &?;2 b(~>yͬP]jw ۾Hb 3Zk0q5S9nnyJ՚gQSe\+r.)vCtKa jؿ0t|c1p3o^wļ1Iiqk!J5?b-[)B.YUCD`cY6=*+AuLY:k8%7Q#6' +8)v̀.{qa.TǢ?l1zͫϳ[>pb nz=PS ?J2rxF?`yI)5?i֮[$9ܖ7e`O~|/G[=VJ A(1SqyށjԎ0R%󔏥 vvrL6l>QcRm]Յ](m6%f%δ ʶd6r>f>Z'7ذJ̞M@uX  X`(=asB}nkD8ҌYYQ潑at=`rN[b={,&J*v9bP[- sF&FY@ZK?pKl|NY)VLM*IN29c)Jb`6:+R$ZT;Nb ]Vz6I^+iʺ:^}H,I0+ۣq +4.C; ~\oeR?_Bo"}bSdC=~Z9G+]o>]_3p5i[ím.fv۷o_c7Cw4m-_RHk@hW%Іuh[^52BkQ"!IG"M4kKDHث`~%Җv@Bi6]Bc.D @mV#mȑ(LhGDya55W2^2 4o\X'oW7.< QY5.+W+~!axV[2b㷯'~AW7\ka8ӓz ̫MOvC`=ƵdB n%^ j02p>jĜә 9NSȖL+.HD)mjrrI6f.P@B8JYy/'O/,svtuZkP䦾(wFL!KQђשb/Kn6:)e@~Z5@h9ܞOU82 }A] >ۖjlִPXW@ETMڤC3 +DTgz>M+5,u."ɔU@d{2duiI 3jĒx -j ~õ!(Oڭ@jZ z>-FI]RǖJ~(aؠ~Lky,}@L @RO&8H0d`޲w衒*O`dth*Zdt Iq•e0.uJd'-9-W v>2p[i&lS0)ic:(GS\`oNr</NF5{Ąқf#%&|3eW/`'tv[ey{0#>8փH3e}떌![ >^eld-{79VgܒI\-4Y.:gf PG2V=~6z*̲X,G'{Lĺ 3Zi2ClYrJ4;P],jEhQGFPd2>Z2MTD`Rl,ؒ0k =àҾ ]<ך33ѡsGLX[L)^u|Pf(9@O}aIɔíumqKҘ>z#jZ pz=TUQr[i~z!S?/T͹p96o0|"cbT#eoذqɶbO4v!?nP:l w=*֯kU]ZYicG. TAPerR42 !֊N hymIҐ4=)L߳wIS͵jl # l/6#o jM*R2sWbm]شOl|v ~{$36_,>e.YSGc,<Ž;|sqGr{\|![fia!,3w {-Y2ǚ'o d ʟO>kp __z{Yw%}şN~]],,ejy2ڢX䢰Ü ƦB|/{SW￑%*y"q7iH6Z>3/Cɷ,뎠mNEr]k"+}-*㏧)_9ca0:h~P\QYArȮ<(z]Yb?wc cp-ݟ.6MQo2@C :swI?~ ӵzMHpL+:V޵ƍ+bV2p&gyX`H%N|ɸ;_ݶuS-Q-Yzc"Y,օg&sZPMQ:Ō}T3Be7E~ܣcD6;^4/ {1T&[{[S4+Ty:+_WOʛ{^_.Ntu*n"|to~Gͯ-Ws^,?ʤO7'RR V㭮[fLx[:j'gT}H)JiM @%CUu8EK―Z: ۪ïHTMЍQDh~]^L `@k\ꏭdJ $ۑt%J0|1Z,FB(sh3QS*4|}35s(!9ikM@GD(|(ο))GP!&ty5==BgW4+* ?+|JnjqO|lUİH%'SL 9cn&TXEYdKydDFXZ-颇R:DXЊwwR$@rI:8a2-mY븡~9ЪƜF ~|Lg<+pq^}>Ϳi0S3Ǹ-p&?y] l]{:{޻{iM6H,s8 p&-uL(MnۉZ趝t5VsՓc\z()By7G*J3%M oM*)`JVyѠˍk%)䚺^mAIiRPrx+Vf"#'y)(UWVS]i3,r634NVC XU0bؽfa' [1*ynB GCf GGh<ӟ<~ߏ lc SOB3GEA]h5J7PeL1&Jwܱ1W!Qw cz|8}q#ac!<"u 2^2xЄ3 0Z>/2A ?=X3H7[8Xsa=3@ mz%gQ Q e~BAH]e~/9 \GVs%0z[=9#z} cId&bhQGQl'a=xs>>Jhk_Ү*i0be'|i&)ڍ GL٧l?>SܛY@TZ/(KBNr#M zعUB2TKF)/%Mx;bn>~ؓD2E鴅Uē'^{m!Tw;&[*}[Jp^IwDKU07rO|`jy2YZS_UwO x3aWua_zm#~@z#Yb MQ\`V2O q(ĸj8T rB-ߝ/債\[Ţx\I YAx3ba"ό7nۜٲKj\FxQ-qAиP^c1 x C(ЄH8"H -Mf f=`VeYHD;3# 1c%}E'EE-E#YS$dѲijdP*FQ$0#Թ[ҼU(Np+E##}g[]yִTvU9 K[QQ\1.RCfS}86+&خcöfңK4[1u1Z#!DH(.Gu> mo+ >~\)Z?~vD0.Bx幸a}8..;>5A=4Z>1^LDoDyŇW1bb^d"e/%Q&229JeRb3x/@W=@\ y=Ni}\=GHvڃi ̮ [~3`vփu2=Sh/  4W:vY*SJc))w ?φT<]ىYk%aߜ:.̈D\T_^Dl iy ߇7JIi~eybldE0U[;wW.S -MUE=]RT`mhHV:ESWA_zm'Vb&νJ\)C>=B#\ UU룢=@R @ސ2wSeBɌ*3JC?̣Rhof!?^lI#aX*}}i0H{8Zl~X0A׌b AC2 t5ryod Z 7yj@^|f-ؾ#\ iO\2o> WkB:Q5W ?_piR~zwyi1C <,j3Kdi5?_`H{imWJC gHEdJ45hZauΉC., ݻ ӑKBL}ix4D(2 gMm1'4 sfv./)\hѣkSK4ʨt;I[XAd?a"FYIYIMTv&P9bf8o9al(9V>h,t(m#qUF =>o떹.|,K:Qk"&Ew$ƑUdEΙT'p;1Z4Vo~9zpM&upNy8ߤnf1*PB&{a5uX3!($圯XR鮊D ?,oʛb碾FF1,MmIoK)s{OZ2vvr\C>}!_wt l<-wwx &J ͫzT+^Y>Z}$ЯT-&W+^^[֎W6b *t:Ix2R.IPs3%''m/ @gYӚZʍ f* 3YL99vfGJ:5uyHuY =Xr(#KBBWD7Rؚ]7aDLOӌ LiCAU~;f<0*v$)r4S4!#B/][P$o.]d'),=/A?}_n*-ȭD]SLbʘ,G\Ā`p^C!5 R%aKYHZ(m "=L 2qM8*΍gCJ\/x#m4Z@+-f5s<;Qi]s3^I+ך)xfA5<[&=f=C_o"EfR *F(kHXNXa̔ˆp(ٟM  e=7׊^'D:>OOҬrg7h"ITņ, "ΚEa=©*n_}3Y%&vw,_KBTcܧ2 a%"2ͥؕ̌cojFg˨//,1 pۇA2HcUw3)U V=k`rPQ3zO5<>PYUQDlQ5>ؒi3(V7 uqQWD܏r$UY޵.#WaGfϖ)w@{Uēqm&qřqr6ʻ)ɔ e" ͯFkמGaV#ҍNSnܪyym'5>\^]8L+|h?Yɍ20.dWpHpi;EH7`a.54FJS9MDe 8'dpأQO]BoQ7ȖW7&:͖BB ⢐xQ[LCfh0k @6h`+'C%s b]2b]>!!L S/ńYGϳ-Sy+KVq Jugwd7PUwLPbЦ!b&j=N&03ʹyfp Cx # N6&CШ6z䬍 ĥ73{~yoR64#nl{H \- i;Y) 9ͧ~[.`>aE(g/.vN%it纎P .> ";h^F0+ ߉XUOg]@ݞ5Bue6xFgM~64!YEmk_r^/O. J>߄23F# =g=!}1mxfY՛V5>OMqjjƧ!jƥy, ~쫃)ظQWN-7EUC̻lD]1\ &\)j ^ Gּaa9jH9D ۄ?!xOiMW$,'ijڴQcWmƟ9&#jp"j1!&+Ok-rsuhޒ16 wo \L $hBZ$]T 9' ] juA@ U`2$89x~w9 2Dː#vynV \ˎ&cB"wAէ B=)7Q/ԓs]N-7H*٭q8W{TNcA0@}úLrBb=,]X=:`*qwJ+ޭb8.QW̖B![:#py`ue^-%C ?^J"/(6Ê6!=;ʷ+K8NnwC0Ď+y ?#[XJxsAU)W8VǒX%BM.!'rI~sM1E\^j&jǨr-י=)x?܄OR62v|KA`ֶP)7B kw)l6FYm(f0m1\\ :)' >̔qPUՃTY}}2QfJSZqlH.`(x&Z6?ts>~.x%[W?øQ7nPB ᎤW~ 8I\a\" Z'VfQ\f^ɀ1u<J(;`SH4iS jġM}Io:CPf0!(c$bX*G8}Ƈ"=Rn>@mX, lQx6c{m% s%{x-s|(uGn N*϶DJk/z"5C.hs 8q m6.*/nG`ݞ?uDZ20RO t~bõ 85hpeV>CI 6r]\% aW!<y0422$[ʶ8:h?ZfOڵu8C-ЏcYk` fmZi ~x䨸lJZ Gd|}:Aпi;oO.^tNr|\nK˜MCDcϦ\:2 ONOL'/c@䥛<<@IqXϥ\^S &tCo;Evw<r#ӈqF9oG~=Oҙ~z?w^ljyQSwE׍{ƭi'Fe)Spy[S0qNv}tӨӆo-je&eF1e9yz!0wV{=*nܑ^ChCPz%Pb?&JbvJ&t4#dCwPgWoi5S'4A6BUUte>avoC{=h@ߴӻgf ֿv.7 U'ڼ*N0u-ܸ}]/i Y, ˟/XW_>xqK@(oF:=u=%ĝ;śA`@1?BUWvryZ8O@Uj4Kϸ]KR! g٨7};JIΖ}~ V'`t{6Ow sqXq[8aQfi ax?%Ǥ\"PgnQZq? ]F ODcUaT * i_У$3cIPJ>!\MLpf~83r+x }v]*9n>gd@  o`T ̃0ևMyPBQe -.r8}^nP:PQVG90ڳ;+^MN"-hްG]|=I\iw[]kfO^,G=1iKc=?&b\Olfb?,G:t.kz O^".D)#&eufޙ hE ~s'*b\F&mCnAEC &+ޏ.naq%?IN(U'LBmTXc炁| y@ ݽ}ec`BC6,_||g٫1-@8]iM |^ F-%l=WcIK[x2HoR^o˾@(搹oײ8ҤN "-@1 ykj% \,;`!"!e Z rXE/ )0_$s EQD+T3E`I%ƃT9KsK\,B:̓Kiޫu#:&Oc1CF"oj$GN"]~3`hv /-^8ԗ'k=do_{86Bckі/BUg[ 1{(d ,|U鵚 x?OƛWb )(g!kGn=+ IZKC$jK>-U? ax"e"5}_8~58-8)ߜ:Bԗ}mR.Hڕ} }R+K/oR{ }S۔W`A>EZ/nE7tػ4a~eeN_VO}56=&Ct'tNr 4&H%N)%QBx R*"=M  $ZyLK\f|\Rg&vt|t|t|tSB$!Y:e 88FǢRG()e۾UJٶ/_G>GANj<"98-^~P9`AaOE囁DaKk`*OQyL3$!gϹbs ڜRxEWig=<9?˄J V"g\J m6'b4u+Tv糇T~?a2fat2\ / k^I;#As]i x@>:$)Sz^eXӒl/cF 'p`p 'ωd6#B]! 37LIɔId@_!'H2R 7+2(ȧqHdY VZ\х!RWDž]х1I#}1Q+Jc I,Dp2;v DLpa>B&ʥelBv.BNK=9;43TGɒ(e^$sR2C>0TfwpySǟdɎp*5SHjL’'>?]Xх+v݅]Xх)D½Ċq( z*.^\APΝO֪OiN.fNp^]p1`A ] +zIp7鑴m)gAq/f=/ 兟2]^M bF̡2w2%!DBtvH ! !:2KPaboKHHYJH!uZ8W^Odߜ°hM ${G_ j)6*ro :c\6url: J[mgu,q(X(LD _SRKT'`!М4TՃcSr+V?ܭĉjן7h`s:OUwub)Y_h\+/_ԾN+P@a6Hcw1ZĜ :Kgޏю^ :j$>K`BQhT/>JB1Ŵ^z7|=x^ >TuJBtM)w\۫DTP񑎙<BIN:M A6;E<,Uf&!2Yh:70Vk(7!^2 [dpIGID;""H&JGE%jj~KfЙ3+MMڑ©iI(>'ޑKx;ښD =\o^=ѾT,Mդտ_ɪ <|Ӳb&uR̋ x>~rf/&g)-Q#Vxi}.H'mVx٪UbeW.]~x5{֟Ly s-<5{SEIz>*6tsw84UhfPOȖOހ{7Ѡfnu:--*׻}cQO}ja "di!جunRgyj zU󁵖nzj2fx9k2l&ZOtXvS ̃j/ͶԺ4 i9Gyi:F4{ǐ%S!fgkt&Z7agB"g&tbIfNetRU{hu8ЖfUN[%Z}^Q u]'u4bAjstQ8飻 XJ~qRgQa iQOͧ5O{m"d%D-T 11p"^ DJ"IfCAKsYjƙ<7X}-)@#&^Z=Q~FRIKgb\+ٳ|F-oGG_M~j?2߿VyjeT數GKS]{i5^C7#~uJxV,T~Уtوho,DB( ZH'4guEp5\;$0SHD/2R%Tp0q6mDජ&[A>o)@nʀuխajqKl,H4)E 3֑.# ,K8DkX4a AbX>9OreBJm"6j7A=bLR2NW AjSR,FoxQI"IR?DJ4BJ(% @nfJplC(D @a bMn%~Ԣf8DPQlj-O"huQ渍(M .Xg#7j(D0?S0 *9ʀE0ڗƯ:$+o/eTVSR+_^2u?iti+H{zw'La^Bik8R xA 1)xA0fRCSlC>mA8Z Xlr\u $rdU5d-jrXdQ"sȽDό)+ӕe 3%r}[/`>X`[S|. V+g[AɓjRo*wLZMfgZ)4jB_MR=WVWΕZLW` :Fz>+n2ƚO؇OJȧi|uBx!vŁvUz+'4%dǿ)MGh[{ln'귬 asvpnr>`aDTA xز5hݺˇ r|N@޲Vqw OP4T#TOzRsf?p4~n#) ZN>JtQ>6[sGפ +ɉ+{:.=[tWؒeu `}YMcj@srL;7rtԔ8哙N}}PT,PéJ>S+z2Bcd4QŠ8-T k't?oR)~?A[!E,ro->[-J*;v+ȅ⚄㯍OeʴUa,alWUY"+}#&m '=J(dхhSFJGb+_\]|;߮ǧ>z؅ >O2:F3/|8G2Y P`s"BNZFYy`c>)\Η(d !=zAW?%T }t*É:rn ksAy'$# ,L@! )GȎLA3+4mj *j*9DKo)zU]6AdeؔxRYlu^K!cD*ӻBg.x<{.k^@y?DtZ(2samL+ZEE?$ikW氄 2 ܪgjZO` I_z629 9I8T;&ь=Q-0@E+c;2t33ވvDc:q!fBT(+5ƶ(63L Y 7Ւx$sA%e&i:6XmZUɿ^'WQڪOV?{E\y^_~[޼*oz엯KS5w ^'4HGteKZ/KWp$nH02 &KJĸI>ƼeFBXjdJv}h6ҌlL8a{ҧ.eCzA}QךElըT-SLQ9ҷȗ\b;K3/5$0% ceN6tǓ(hz矆›hyHeV'|9Y#Xhv:kbHN웯  <c8`}KZN7i>^%ˡW 1LWR"U<5*82ĺ/Th7TFϒ5˕AG#zd0a3ʔV5RvӔJ:~IW1ާ}E5u؛aĖN}bKuJdU#aWd# * :1 Zi-l%Gڡt׎Mi*e@ؼIDIjkY) v}@㞶-ע _eEΗruM* IyBR>D>sVc!yZx[DRs~Cȶ.r@L(9!͗I Tvܟ^Wgd$}&ll>-̲̼Q̘FDK737*B5;P,FUTs}#NؾB?}.C}㳇o*7Kjs=y ]i1^C/V=eN!)Jt, ;q8X8"'-]li=Փ78#]LUomj*.ANW- (&L*;Wx֯ u52]}W3q&z*1zwJ^ p}Uw@Km"u[l}6@VZu \ 1嘅=xd/B@S$k,P;ﯖO,SF96Vjl8 }9u A-'LBIAfFI+0 r&e75%'%њϟ&YZuߥȻ,Fe1.w6]ൡD{i )B.h`$+R5AT }0Ѐ0neT;ԘU=2b %X"D̅ mn2287{z :gS HJ2ɲb#+J61h=Ln FEHװ˴i1!+Ƕ2-ŋaA8Hv{EqEl $N&62K L=T:P잢J Ͱ "`*#qTgU.r&-Z]tV Z0U'q."AbIݖ3 R|3_V4lZ3)bmk2:I \$4o㠘Έ zUK\l:NGN0EYHc^^1t L'DT7,{ AdNf 3 -)B%XtKkJ!'H 2nQU:ܧz@̊I6zvZSHsQiJƯ HC*Ǽ:9RV:FKtP5,vߒUo,6OHaBBoXaMK+snx_db>3Kb\K+Fd2chPB4EjJ;,@KUfƱͰmHaAKTJvTVhTEQ>r/3 cƌ)t d0]Qp[Ïi!}^J4ܽO[k Cn.pDgLQӶWLN1#kf&^/YQR}gLrijoLO2s%)G1eVje(l8db zU<{N(Gߴ'\oFWo\wW6eDZ΄ x aj/7RFT"k;mJU%jfnsk5]ŒMلXُrKrvM7 Ǔs:f%ͯ\]:wB /5c5b,:]A_%eÙF@΍5\4D7!ꨳno|'c?3-x½`A}tVj3ΙaazƐ,9}lw6GEuG ؐ3YM|:'(_Qoh!Ʈz)GNSbCr8<\rbg _9xcAP vo>n n^/XkBX6p͇[rC@noUuU=J"^Uag y"wax 4!ez&7o7kPeç,-Wf٤c?rM*ob}?t29@Nѣ$N<>&=L֞4;qəg`bf{Uk?9 |P|[-13no.nnҞO*_c- griBcr0'2'`8&'{}an/8"_nO_2Z& ;,L@Ar<^ЙVV" V %|}x{;hvo^T뜴,Yy(1F.H;Q&h'Jky EfP\qAP5hqC8tmD ɀʅjosep ژRۊ+'*PZV9X ?78܌Er]R!HJ%-,Z䜣+٨22 $=zZ*rm9R7}Z7o9OG_eW+O*)  `r{ [gr2zhQ)jRM;HpzkG08lo|M]8d(g"4Gcw6jKooyv&d' R&0 #\m DAm@R Lz}ƅ[$T_{q8|x)coaP|ԉQFO'ô N۟Jۗ߳.7s6N+4 `?~~%'Z®Fuܭs6c_ŠƵXbc7 t[6OϥsK -hynኁoNܘjŤP=x)@dSʜD%§KӉFмI=o 6: زDkX">}Vm9ْ4X;ͧ/C{jZF6qݔGv3g*a@^٘dshRd:ūĉoFPdUdT+72&o8h"`y4h%x 7^VhgJXʙ-df{Li]Q_jcuer{h[`QċeE3^X5s\,s ɚ5R)lRncrexw' IfI`;}0_W#֑DRE0H+GuQ+Z1Zjc kCa" Es+>%ʽ ΨPb?*?ʩhM7Fp21/d e]r!KfߑQA;7lI:&i>e RBrTsTK9aI$zɩ3wwǺM]xUZ7:лuc@$ hf8mx~z]΃o!z礤`+v ijw>؜@@3; v8+qS6TXp]s39#Q:ՑF։P,I t|%mЋ"} xz6Oc4 yr7V4RɅqIE7{(mJQPI?tFrGe8ו|e]?Y}m`1Qqrc(qr{zu3Z}B.>bЖ#F?fgĉFۇ?O9n/0%UGɂ Y:M_6|GnQj1\U)rc$3D5׹]FAlu'& uWi_ߏRъ߽{htB 黴]8e򠓎[Cz_'_m̙Ch'lxϱ+?,`,;C&Q`DآP|AsD<@O\vm@ItX[v2P([\Jy/_|:ۺ&$F_)NRI2x##z)uA)pcblVi [mH5`4~eLJ|ūͳvt[$d+u(NPYkqˉg"\{aRlŵLJ;%UPeO :WxD''T߁|E_uMY=\.g&4!zjա [\ֶ$BhmX:f#)U㥯i#6P7b I }y1+8h& m{̿yҼrxֶ%F qPT@5OCJoyWMM}v1se9Za])ySV3сNK[ʏ>ٺeQd( 2H;k|mqvoTΠƄ*DEuJՊ1fLZhd.V t0qr;V>^'#`V`CEHinzjt}'IqIOJ%h8Y+T~/k.ި B9!Ҿ[ܹ \t9ٛLXWUl#]z<{sQB. jr.cuCɔ}(ZM6ɏQP.Y(bd.bby#;1n&aFbmQqN4|Ƅj'S$k |ny9;6oyP"FVff5&L!;O6[^閗oRJV)Sy\ TnFmrYӃp5 G Q:Q1ie(TJn.⋝\\//Jp4Y=^ϵO+]KGI:yMJ+j5;G'ER"UZx"Ӹ aՒJb?ׯ+ `UF,`' U zP{/1֍"yHp jiy*#50E|J PӥkNPbs :ScWrQǒ&Y:)ZRaK#[˜s#&&'A-1b焠> hTkA%9j;+3`cL"͋[ ,#@dQCCH$%QRBL5 X;-p^Iqm|焚;m8AhJ5O!N4zǮ/IUY7撇<=:H-@fH|85P^GD&–D@[b6 :aɘù.x%!sK;!q1:ew=nesL\l#&nF3'i]S4x=OUb! S*A(z DcMF+[""6w#q12ThXq}bbf[\En.TFWnp9 hX͙/ -֦$?q3Cg%j.Ia2BsVJ%,k̍ t1J:nq{W_2 j=O zܞ[~;Ǖ%K_:b}BTE5Ȓj(î.s?(5E-*< <؏ Gm#'4{`uIOAe r%܍ibs=KRׁ}zOu6Ij]2ka6TvFtYeǚ2aD0M^!I>y>N5 GЊrhgRʨ[B _K[}s\8%]o7.G]T\zEK_w}a&kb-@9p9B8C'ú#ZKdTq x(S!]g/q$=Tf'c0)7:3kHꥮ2 렣: D6K"B"Seƺng#e:Hy7 4 i>I:OS 0%"]v0=T*{o!Iʨ~O{I@v0p1oDr/y2A nZVZ\u73DPx %Q^//W!~k.&$a,37þ|/Pp:ѧ'c82 ]2N,vWKrGc2/}|]zv-&^3y9\sCC:[Z4oݪc_l^ӝ/'GgqNc>?$| <{ Ѯ$ Bxs  $Q cj=֮'uCuU\&L4G7Wd\Zn'>KYl0s6gs7OB(SI&X?e:MF=| o!T9蘯W;G o "w;,LE,sG wYʥ0H'q%<^BEp,lYWy¨fh[XB $^!#7䢇iM z8\w{Duٌ|'crP6G݌|'ݻZ~ ^<^V ho>=^X;~g!kq{z0+j=]YC *) "Z; ԡIJW-z*wWcçO?~d*x7-'߅Ե'}. |H,O:\7Pl"bYvk3*k6+Bd^-YǵMha@GiG(ьL|uW_`ىw`Ϩu#(tCr%ք<JX5<Z0Oo0ʇSOۖ)jexLj^V<|(-[@#@C(lC^ AG72O.6۽>jzY6J?Db汏f뱞×.̀Ǝh4G7_d\ZǦw6ݣ9=\Ou^Бh h޹Ae$B&3˯֜J F;Q?UV,o?L?CwV"3a.Ef)3H=J&j+G UZtcSv}rk,7}M8; 9zllȌ FglF?dD/1c3^6dtQJ%O],' p p;)ۏ{ʔM`FȘNu68ƌy[$>]/7dċl^dK,2FS8{CW s{w{Na'78@h%K.SVG'Չg2T3lO4T8ʠxƭAHqpƗ[i&LZ*}Sc }dwTw}ԆF{^T䒙ID늨94ZZ(ɌP/Pe9ӪT}>teQKZͩD%x-+(ӥE g)QQ8_E Aumsi*ul|MZtr2m͸C.Dc !,/Ri"4SsB=lʿ:u͝BCBATw9.) O+{Xz^%%>q5\S˳v7~Z0D%6r덿y!AϨ&v}1d%E,TjXĠKŕ81ڛ2a]qcoˮN]6]QQecW%npjإLk9* ~u հbjrd6ZqEhE @1^ꝞV< ȃfװHA=TLZ!Nv{K x hlDwsc*Zm\6's{$| Uj,N* Tjɸ,lN*5Zm,{8=&=Ĉ+`;/)m.-Pbs0_x\Lnp=R}&dRUvW9ņ8Q j r<01sz Ŭݎ6O[f!:6ԕ̀/A5V9NKս,՞YR.<)җ*/q\{ bbjX\~='hr6yYlQ0[v^^)Pg\|2_N㥰Lp 4N9Ϊⱗg4Mn}A4F,ԹˍyGtŸȫsߎ3Z~wWO>; mO"\`վ rխSU'@W{݆#sψ^/ZESN>x}ݑp7ݵpnr{@`;pfɩax{Yf785ț[L+vՈn5ɐYeV0\xKEЗʁRig3XWHqtt&b$)#QhYAz<#3(jED;p撀LhAyA0mtȶ #PPBXhA7 ԿP3mkg=vWOFGLGrO vtИZ'nZڲ0&Z{3˼ʼɽ)h|e!mR*/A "QL:PX spr̵OPA`t k֢:w,?Z.Q=c3S!m4~ucz8_ieWa@ [0kzc)qCk,oG5"X'->P*2#"###,M(#\o_eE-VWѵ=f+]K9F d]cB0cGOTdA nR"FE xE 4HE#7! *V{x\ O4'r#dn38fPs).!L9y )$ɹDsƼϜے)PmV"BfJj (vM#^%!Kp`d&/~?{% R S_Kje+uZ517K[8T|k%q@Wl"孈A-YAuǜt6:UDʖϦlцoj&h~| ]9+/ ʞ(Iq,jpՔ]\hazEhit+\hgRM9S7Y(2jMY(!Tuܐ M@U.^IVesT~r``u/*tJ-` Sd hSj{@cB% صP/ 0J_w&{fyW7E n6kAюqƙjM/vR*Y : j9$naO\O8>.Wm2tƆDJBPpX2N1bvS:^޼޾_}B0!˫5C[J8c@5?Z~X$МQy@pQ. #E<'M$ Aw 1 ][bځ@J1):Zƒi-R L"*OxtjL)Sm&KC֍[Ϻڛ, ^Z"\_˳;v!4iB<[ZϾӑɁ*SV5cFS \ NV-U "_#x95'"`!&JƔjGAj!hrrGr!9 7:4 KD@uJDh N`б U{P5U1l1y8Sy(cE`֢H{@ص#׀z:l0U3ݫCl\6nvuHUpR !͛g!DW }^OmPU;[z1ᇖl"O1՜wVªykD RzY2?Px]g^}57p7Sq8ؾ5"MݼxA=wT+/Sb'!EK$.~ kXDA(U ˗8qӦ@џ:+L|Z =G?(U$kSf|ߢ+շNYCG^ 4%d >šoLrN,An~_o?ˋ=z_X]m28h@9(3`ZB6%dV˚6\ՆڕWydUvWURƉ迏B۞z亥\0NNpiN6RHvӃ3_Ը_m-GC=calJv:abix4'LkX\xJt^yаWh1ms+xfFtO7ywu&oPx\2-?QVG=@dcIMp_]| =|[ǡo3MLJ͎D^>4z$%%?5hɗqпDL2"gIuO\ #yk]?n6;՗P$C_$_]Se%H~ gu%"z+Nw wϗDcAOqIB\J`a8.&@NJ%kq} އV5>h*(j}8V 3aźFݦjx{=woo[\m>S!q 22=s%d)2l<),pTi! Mkf"+1q%j!wj%݊:8X%{QO+^ \Ƚ+@n]K>\P0nyM.W]ˁ@< N:cQPIoTc%p^qIp9h(NXB'=IhRkn_7S|_7xWwWO/ x|UڰJ]9 n݆eV ydF=Xk %:A} "PzsRLS$^6=U;\ TveM`}<OjvA w.<:Ƥ!?UsSC8Fww B{6̗k厦kc)# =Y(: !A=IOUG_x5׻huF_>}󈏷m'D ҿem`K'$f7]Lzp6qgT:f =TL}-[ Nh݄6ƈ)Ѷ;R#2zJ>Gj{ybp^>=8OS=1q_OOGM ͥۤXo;P*Y܋)?}#߽1ڀ` 8/s7y5!.>O'"rPe!RS%cy`Ga~ջ7 T{A뫕ak[ޠO?(_Y~cl$3\O'v7[jmF/Z`sQ)J) he\x[k%k plWGb!+i$W@=ndw+Gkb^1Ѽz=W k(}rqPFd FNlcq<:vw c|$GyOόx-47åEN<=8>-J :5`{5v/zbpYo( /^^߿CY=]lroq|̕{S}uͷQ+*]ak/]`FQY``$LB󔀵Vj7j|q_F~ %O7뻔W|5YnEn=vg{U[VnzRMzV\3D_&)2:@ T^P.#Qc#$aD ,bvH5$e# h@@Dp )I*1S`%~JZ8~\s-5?uGqQO9VeYitq ͡9o+a3gJ)9.F^#`Cbn|uBC5CSwO7)VC]&#` B^' EHAL&y p>t'L9FXZ…rY[@rE^T8]8"qo]W׬Cc .8TgJɹPѶ`,2BhN̛c]2K>:@ SJ-%ͭQ.Y87Iٱ9*0Lk%"SP UD ˢ*"o{BS#[ iƫxd9O*b{()`p_%'sL)N)ADSn f:;d‡=0-uwA_9v&rp*r0vv:x. 4M woA0eqD"d8(:!D%Bpj A6`[rS!ٹQ'\Z*Z/d* c(i6뤩`LKznq)Њ('"A"LFܿ9Cx18KL2*D;L]&ej]ڟ1a9uyMq!уK2PAJL-C `^_=y4/b_n~{˯[gt(eRL69:].U(ISR1换1&O8Z!e"F[:)b)9YI c"ҨDHH4`ˋOzp=Wi~3_~J:+ ::uw6w(p!L 9\Ufv}!&$u+Siʓ]oFWۣa139n n͗ ~ѣĖmIG_5eٴLIM1 -]]zuwu"\gKܯt R'QW!XXH|r-4n!x`\Xo1D(@_BŀQ^Ad F`)1d ܠ4%V{D!hP "Xt^9 1"oh_‘i0; 3$!1w'B250#4Z8tnA4|v/ސq\"Hbe^*<!c9J<PđP4a0ѻ{f1%֘H'+D %,iT DBSԼ# <L>$@1,{[kx[,_a¬pڢ'㖀 T3A8Dx^>gk0L)XYX6J 66?( Oh2alL6܆̣ )48\e޸:J>zVg>/8?ʱ{x=yn+ ZXy(>yz`7""C8{!ƣ"go_wXs;q%0=zWXo1L0KXp>Xq! !\>9U,a{lzFAod>ƒHQf>5{^ 3'`vxҺIF޽w{Jx6sQ BƯ<EgAnref 3Z \,k0؄=EE#Fh\G.rO|ˑY,+0l9i!7R_ Ra<$ zGxݔJݝOtjk.[&m 9H+xe?|}5jԮ[I3H"),vˍ ׯsݪ9Wnwʔe Rۇ ~KH)lH8o7 Ș!&soo?6Qoyɡuj4N,8嫼%* 0o/[ E]QK^X}D }IˎLpu:eb{ֆܰ@uȨ$c:{¥6lZK12?^?̀eGwH%|_ ʃ?%PJo DiÈP0W))@@bE;;5eGkC+xA*ձF [ŕYf3BǛJ싰6Xm+XO+v2pҙZ0cnoERwߺCWŻ!rwV>F=G4}0e n&Iq+ 0hk[W 4h#v??|KBfJ(ҙmYw6_{hȾ]A7 ǃ"&iGsC$!ƈ!1[/ݩA#"=H-&0KD߰3οEz=kzC" ֬풝g1nqF4C T QoN[N?&\״!IECHN׍{U87T-(́=($Masޚc8QK9eyt1XovVUhUY Dz5VCOqV;Buo;|' qUU b FdͪȊR$qz %#OpAXMgmCQ'3hW 7|D}~F3)yS~,a,II B'0Z-c$˝̉M.pKm84~P|~]O*z?;n;b4p-1G`J~.?K44 i?"ENjGV*szU[\rMb5!פrUX m V qNrR.X {bANe<(!cIV5R, .Ǿ "|F2T&$^R/8/Œ2XI[+f +H+7i΂΍dn:P#n:|$E/^*0@ nq>>]ť'52[a3n#ػ AͽBRF`%M0RSF6 0R`-II]&9 %q3PGx\k°=E\ 9"g!`zʷ ֭t2HPd@2(K”Ɩi['Ja8,!#!&X&wt2[_s55:eZ4fb#QTuϋ6\}4 U*g%Dke@&(L|7AɹJ?%c)񚛠NJJHŲXAAyjآl<<͈ E  $u]ycK5A K)6m QWTO Ƌ (ixAJ REG+,(u?Si}ViU;E(BDrsbgfL K(2!@]37uhl)N3cHe T>(b`tuRzW Z.@Ft !ʵV^J|x)L DcF+ߕ%r0 ?o*z:5"%sS$tB{ fa+:2:Ri5ꗘ;ѱ}:=CF7{V!~AF{ j*Qf:]Ax n[ {>'F+.=fM*(~Voaë^SsyO6t j^ybJ[;GCUnpV}P=g}X[8f׾2wޅOS~2ˏk+޹nCꧏnūeҮ)Wk|2pm-WpgusƳk09F&N}TEd"xvD4s۸Y\ؘKoܤkVK$[ݚ,ek靜ߠO!%q k&ϬT\*%X5| =K Wk~z̀RiK3 楩4n}^%رL#tM{lH6#˧{&{$ƌ~晰dP22#4F'xW>H@MCt R.kl"`e{F7yb[ܺ?hA[>h!h;)ɷ,s| h^iD̃Xw5@Q=gںaȖf޾CD˽{'|=`~qķ>lrD9 ;xV)ͣx4=!"tiKِT{ ^ EH8 (Q`QJ R"](4vCʐyƘFfJ:2mcCY͒KȺz>p@M3=K?ZJ=?.*^\\}iIcn`bn_k! Mh]иE6t#:蔗hS@]Q4c>_d|M {RagY;2Bj7bwN8h#2Sbo! Š̈́HaqKA~Q~4 'w˚H1~5чӬ檋oMP ɂ %ɭeDwmK+gb|5/bn ARg*cȃ1f<sif:ݏ,,ٽ1*^JQx[t|Lf]}L{1+lƤ昀Ёp8Y׈T;$)#TB^`Ži&dAIҔPOVԓX$*H NTUbV= 9E"Ms; n{s^%-cdqp[Wgp*y/&9uFTK&{p{˶C0V>t;zp 엊uV0\1t ş٨&<]13O B&lsJ~v4^1:Eq^[>.7N-G3첥PC?|H}̒X7sRLߡL+,u};evb@1_ ɬʙːq6S^,=k>dS3JHV>v-$/Ȳ)+ڱT܄dNCbfsaMR-c|uR^ZJu۵.?vn'XcTeZ'ެ[޺w!a=Wg )#cml8X6, CTv$Lvc}7U7 ; =qiK!Tm=hڶ$‰rS&c1Ww,ZC[B{W=c{aKz(hqv^'blr(n8tbnw 63&cXpo4[:n&󕙙PHB6)h ̨8Oa}כ ȿFNj˩ f6,!Zn$׍:'Q$6Fr72: ycdp#8̐{!>&J"s- ȉB!1ң_|>|z~>)ϧ3?]@l|tףK[b̗w~ŘPtq4?X1嘢3Ъ1 FY, -J*Yi9#e;ojT| d&* hO_g+g&{eEXf'5;t)tw~yXfeKqQ;iznz&/P=|j hFs^2Iz|+v2͢#iX%d'A=2)sHَ3CrxI=CaÞ>ꮮp!* %m/[:θw|,i9̵Hȏ!D`!ocJvHs\*zY3-puq^|d5NCY產}Զ >SmjK/giזR`U3VHX@k$BP{Q$@C7ծ?S\E% reU2"p zSgt"*d `zwb3y{ ;`m IT%8g(AEϟT:ړcoO:m^зyy{DC4cuH=Ό'8 | 3jOm=Y9 rW  5 i }{o]~.HC WHʐk@QKx(b*) QbMwD6T673fWR0YZ+ 5`c'U5%ߎI|HpPHL∘/% 8(Rj+%X ry"*$ e2@jRMAB:}q) @@ &rk޾RR7_qhNP)D 9A@g;,G 3: E<]XruLψE10eBD ;x)W4e&ݿ:vOFwQ1쥀SWY% nbB 0 -(wBjPŤXz&@%$yV*L"Pw1Cv4夣"QhP,,T\ET#KňsFbh>e9JBY% >73m]!$G{yP[@RV8}âr\+o, AuN+ב 1 } f(B20*MPJ|e^9&0EB`CГ6DaGوY{ ށ24j遬731D4VOܟGOBri\̎΢ֹ[9߷%`R`ǡb]AVO,r [+=hP5os:97>pռ>q2tszG 9y+~QAT( u`dqNĭ֛hm] g7ѲTG׃o8%(*  d6(ڟN<Y'Nd7.S&e[,eZD}{q9tǜP!sr~6+'Uڪw[2"i &4SfI~ŝo ^7Ћ?d;3خg7;bۭۓbJi106LLKJ8nLS)@/GZ\Ф֦!;9i^$-l+͕$B , 3m*`2|ϟ IX1F0Q'<3\y} 8BC,[ˌbVJi;ub+ k?,T;qr`K$ZsmHknt6vF75ҷF6Hc8j )~f {J#SxgfbFKv]c28%*Z'J.*I3KsH`{YP;)@45:|+v=Cm^lm0?H\f,L5Bm fĩ*jb.qܬpYFQ.Ł-!Εp{K>>_[ TTSQtmWKz  39wxp1Mj=75k[= R3z+~Oq߃ld&|y_޺9,]zy<(Fs;<8yd SMaʬ+C;yҥ':ߗfoaԓDhe~To -6n4GsE7gR[p0)T79c{6xQZ9F8CGC Cw\=py={V|cds{Wx&38 7{P67on&u"6x(3Z{blH=y+-ݸHލ PaCb]'Xf]g%.8BMcR#Z6hKPmSM?@jVj @۫M)H?,g9.u w+^rIEF;16>Dq]8qy=[@Cܛp$>D!<]ÏBĉwq0ἹTAn~T7e/Sn22:>h͑p0 ݮde$t*|$dR5c}+y;,-|ލ8)~LѴts{7!ܠfMJ7lF ?ӳ~c 78 ?BB3B^+$W>5kg䗽kWMXk^b4 KslJI UrH#q/$W :Y7 TGEK 2&BL%h5*'(gC% | SIPTaB2 Y+w]} IAtQSV.=2RL"4aVu*8z n~%8Ű%wTX'6j}ŲG2 mNڹ\x61Cg@pzw@!Y;Nhh!8TA^,[>uvnTbw Ś;HJ7NݨEZ?& K}"(pP\jGB8`ջVvtx$5rqݵqޚFZ|sݘ4 蘇ԆHIfcA0\̗%)Na"]z!l/"du(N,$;^̆H@P EV\$IB 0`8m\[qI&yy#eHG *),sЫ`t{?^Mnu<'q _?C/ ѯG `;O)7yKˋW]^{QI?s7 :^;7w^>Wݨ㯣k&{Ol01Vp1L] b}[0x~:vɃN%hE-f3!e0\_S,v!iZ׺ۙ OwM'c~=rdv͢9k7E>d_"LG K?x*ov& L3;OKt;s{<Zd1_*N?; ,xUl]:v4(M.vF. ;3~b J·xk~*i57Oc誉`4]z߭=x8<0L m{x Ftyj2f1a`4ryg^?00O4]?A~Tǟ ϗ'Ugw NQ8#&rv%?XEw.g&fх]D>^MnǓէo@^ c~y}, Om"A ouJHKx/Zy 5.+'K-%URJFib|lD%ȦKhY lQ+8od ikUDqk`Je?8%>Z"7)"$@QdA;(Jbj'jVy`a㜇 36!Cd<&`(5ُWa@>S"ŭk|w,dA fn߶YϸߥP \:>ڿ󱗴~עoRφ],:'3bI!~: yp0 ?[H?L&l5Hbsi5PsbvHqae ?Ʒ_CLE!yCГ 6J#mG%0+L"KCF B1:THŭw q$[?U~ uܤYݱcyyYje wq4={{B"SMi 7ߑ]"ZHL 7I$UN8gx>λ c)5aB;F]&PGB3HZ&Ap7JR8IUogbp^T!A]fT3ׅvw׹B|?_n\/K9_n ע>7 ;5VA#tH/8Wb+QJgwtgN]nT^+R7E RqBncTL\C)ݲ3mxԒIJ_xhT&M,uPhվ _#+wȇ -/D$-+Y8hrzq#Fx~.8Ѭ.VWv#&t%* 𲌪襺|`H!X:}n464f':emJ aP>|8=y\4{Ҧ2l:/. =5/UmBٛJfeY)'Yk- M*'2|iܯΓ%*Sv#>TpD?(|\t䃖UkWUSbuZWW$!8{$ r[|'G7Ut%p!?GF!82[iWV厙re9qOYjy,( iHY ff4R"KlD-sOYQms6;QD%ɬٖuߵt6Fߚu\Kpr1Ym|/-jzWuF1:p9; 3*E8m pȨ;cz村tbBbpcIɏUi Z>}O=t^*T)=nĞf16eZvѦ@AxF7BVѝUU 煈9iB[%^ ;j+SD 牙myE"G.[VJHߝuӇ0t>*sdIdZM[XZ~2 ݉W~=<6 "~$^It*Fg,Z(W)@B.(ox*\sn2=u{?FVh>*ZOhZa./h]%50RP!XUsk ʝ$XV=t};u]Kbj2+Ň]>u;?{ 4[|to qGe C ۥ38]jLt9u6J:+Sc觬mAسok竧PK=;<P2&mXz2z\(6TX\܍|nS-dWҽ;٠o%QDDwֈdbqSn&98nCNA 's{f3sJ2YEo@A/!iFZ,a%v~Θh&Ӑ zqa]>kخ!ٽ-W~ l'shHc'mC:ҝUѐN`?{8C6z !jIVLpjիq'?f븭&@rXo432\"fJ7_Xnj =P^ul-Uv2$(U̐8_dBSC|G,$We,h&;'07.w/\+1U:Ҫ)^ BvVeq pܓh +KK4nqDT[ J5tmvjeq@T`x"":o܉t7>xG&xiEl^l({`mD5ezr{Y]0d <L)FjsR ڙ.f']d1̏m <[a(mUJt$P\#ihdnU|_X^ډ.yw9_g WWbP<]rlBA-(+GD U+(k]n=,@lI>CAC=i]AOB̑gOJَ6;8I'[k&3[\+]&!Ίrv]I-uP:A$ *G T+AŇ?2s&B)^*B vPpKOZү$DVw:? Wp^="{^$>wHB/, # UGXmK)nʉEof1uJkl,]AD3H+Kw)]ކN.I؆!\26  ܫu|)7RkP޲, qpMJS ґBw[ XIvAYAwk;XAΌ[끁jj:);)Qصc {2K:ρ}":Q-;QېZ21ӸvMeg/DS X4Tk{rZ}ͨ>4~-;>RWVUqWV&Kg i/v10Pso )s º7Iig_Yw'mQ cvΓمhvq2r_dK&)ځD*5$z޲lg߶$uqz=gp~M[}{N'uiuy'!"C5i<ȼQ q.zBYz)?I{-?Fui&# =k0ڀyTU"ڳ7n&)Eb 4B>Hᛰi,RJ9,%. Zb 0pT#O"H1_,:9 @I7KN2o>'\ m#4ݲWtL6D񟌜 ZgεtRBwn@GæQ0Dq%Svc> h}%kV % {Z{~=5IH.U&EQIxG?bnkpJA@A~Ñ؈ vvqYDž:3[Y[m?x, ]yl:[jwZ3d0τf%4,˫)bI? q H…2(YJGD-%&Ue& A;ٹŊY$mk6៍8'Y8:? or "r.h5ER1_rb<Ƈ/U V)шy(P<78śPRrYBc`QU`Q"UuMdWQk6U@SYeY*\T"2^PfeY 5sR`1nMئmU"ٻ޶W}{ښ/u$hӢhcI.-%Eg%ZDISH.gfggv3 Ss#c}K#tFE0AN3=xL%D!p p`C1h jGY C#/ZnɌv&gu.].]1^FE1 G΢=˾*o[V3Atx}uv{Kɑ6~JN^o$#N6t_[xInw'M0(n7]5*? ?{&e zLFJݍ룟1-8a 0ҰqC5"p݆0S¯5|>g֠_1"!X 7kM5UF]ޟt d`K6m˲#ȺFiTem:_>ܔw6Ka 9Ru*^ʢ7Zz'*Ozlg{+VuZK԰Ġ4Ԍ+ l_nLEUX"C+3m*׺fx9xdlsY>TK~ rl5^zt,#8޴TiC581=Wsduua^ߺݾ>u%e~鮯]3ͶεyyYL/oY{w$ ܯgFC}^r-iU0v W싂=Y~. Wā=7z&0B ̖WjCɵyS~wU+8DoFԹ4RiLr&go&go& Z跰zZQiI8h (@f%c:nPc")Oč!\s0ju$$$|;Q<`t79+PId]*xsR`2e'Q1-\teӒt!^!sҵƥ᠓v>oaJ)3lwcz]J%w* VaRm>j2ሓ)CX".֘X]j%XG)0T|tqBZnvu&d4Ukx貯u)i@$&>JeysB#4r}0 FbGA*G)O+"ҡq(`4pKS >ؤADR{ď`JB򌡾A`BRiXI@ .WإE5 wV|(6H"߄81VXCA}8QV/I8JH=j:Sz$MgQsJHJ68j\4N܂l nʖfGhؿQ)v3` a`JQ ~lȰg"x󭯨+?PS.  eqG kMO$I_),ye6N4X<3caB^0^`<fݤZ&5En)f,Ih.䐸/ӣaځ5&: +W$FuXk!|MVV QH~b0Ֆ_nP/)-L++l'޼wXO9-f{~/ $bԤ;J$U龭i*) W4Q:7R+Z [ˍP|}$fn=-nJ?eS_ :UYΫȨD54r@4U!18ƀk(‘gB@ |ØAW0+NHeS \}.Łee GV<H$v WyAyH)(XQiJ2hT\sW}C F. f^l-ZoASO|&4V- )$x߂J= +yj&+n`Vyb/*?y!ڗ'f;ړ5o2Q׎C\y]/\ t"70ͯF tSF5Q$BKJK?SLCҫ;8ʹ5M: WC&^yIP!Mx,OEyh!ไ=GϔT v0  }mQ𷢘e&0)skա2֢⠙<FFZ Z,yOyejj$0'8En2AbeR Nc_SAQZ-"N8D SfDsflJqM8H2k=NLp(ϳnY/# ד~^R г(=kݠBjJ79q#2q7wr|3w4+ļɸ;:j: C[` 't``j`B-L!#.;8>)J)Z++@!4wt?kؗkؗyB/|o51{Wþj%{X2Ƚ=0|!Ja{ OyT7W ijBm'b UcJ?|.E΋do-ER\(X2}Krf WaGzVu1ꊕGm-3oCkEW$}Mp?nx/S|fRNa&;VNBd_gZajHvVI±2w]}eS+n@VhjgպV pj^Lm>}ep^xW($`8'HR2~r@T EV(" _͕l6*u@Ҵ%0]rRdZl[kPBug+eQP|j  X'!D*C AȂbm$X;@Rliٚ5WY2+wyj. \=ih 9{d΀vToBCsHr]RtpMyuT L`i:άI 3̫d,,P6T0 <#9 I.07xeӛ @F~ɽtc-W7d|6qڿiɏLyףUz-$Ł^.L#7 b4z"1l}9#3qs7ʕASDef$)?Ӻ']kz@x0V !9jo̰ݍv v ?<#hq"SNE؏أ~c]A1ѰQ3JohN51H FcӅ-%æz#as `>)"!x"0!rôXͥ0"¤} $8#:r%i@R{qY\%`G5ڭ?JFnO4 fͅ2p2n Q9΅‚(SG odI0*{`K0>XaHB*npĭ /6H"߄8: q(T$#`b.rU! Hon-)3gjx[ .f lW_"q_^+ժӟĈL' p?C'Vm5NL/C0!WS #5ǃZ \x=nލ#˃vSޠ vsߓ*CjLdg))G`u_ x!4pS=z`˧a2,WSw88_m?}t>ÿ'`M~w22>~=0;ퟃ 0o?i;{򗳳oplx;݃3KwO<~_oLKzc?aKU|^ʼn#5ΟtM|{{>=ͽ'[L?Gt ߊ?mgПo~0gtS1v~8ae2sD S"MDj!*Z=W}#ia Tc?&y:mowc܆p)AM2̤ۧ3W6؃홫0I{stG;6!-ܮ}yzvbOO#| х^ǽMC3i\RL;sߍ/} &tTţ4Y#pNq7Š~(};A8x>^=97 W608}_$ݸw$r$;{O)CnAr~u7nytQχ =5џq/N@wA ?,8_/pҩ#ܯc;\r?;^xP$ sg|: n.HkY. ^7썮/'?A dHXãDy(f{LJLZb$Na` qEjGQjf~kpSYϗ-Ʉ(TݮH2(G, @gd %G"&P bdOg#]d5T~>v .dmλwV5mxq1'ۂsk:ع?N:Bꎔ.Tsr :(*Ž ;_:-qhWXHh':=WH8{gaӽ_^^>;}ksJڤm/N/'W9;8eś<8yqM] )>./^~z ~\ȇT-k^)H8W6wNR"/-N@i)ޕ5#鿢COD ?xmGO?LmspwbHOM j|0m!#9 _%==Xe#_c*pn R "T*Htl0Za޽u11M `nݶ2U(El9hn,_׽By{ZWdW/uY^kȨ^KYjgJL_:?ưlr C&EY9u.>];;MpZ.ms:M#%.m4BpFU>Q~,LҝCjK 1q\@Att-`w"=v*犃[>25ʡzMr5^IFGxE&[Bi{i?^QLIa@% mH`BIѻt1jL2#hbdb_q"dBFSǗq^3H$7e܇iNJ$2MX!h;W1*ZOYRm)H1)NQytUGTgOo $ T- G[$~ (}+C߻] MQ5}u3܏;{g ¬lY9;o]}}6:uڮ|x?v%GQ,dLb̕8oMrGZ$]pQ\Fl,œُ<7#*ЕGAF{ zӘby&HH޸%Z2*_͇ԭ\L"wJl(}qwQF.h|u::|,PGGqG̋!rVx8T88!hqm^jS:>=/xu2 &P2ev8 "hCu#SWd}#3? {pC+,={A/>M w09^9 7η ^oQV3rn"`SNŽp޹Yd "txJGtN$A~Yy#o(Aoqw .*vŢD&Ͱu{,|m>]Ӟ_̨J#9'_=ꯍuE2A]NDEr32t.*{,i}^xF!.(崎ΔIt$ow{{]\zc|:t{7iahdVa\Jz=!9-o?S?ȩK[[ u4u/œ!֡^ddK^@q~ߟ瓈_7ܓD~8ǹm߽/0B-/eJ >~|>`$y;}V A.PRHƻ^pga%0`Ѿ{]VV)>A;s휓.N|E{SޘyO7k **xRؼXX1l-~K/<ÐSRIk75LtL:3>R<Wm}MeGiGfIl@ǡ v FqbkAvf0˫ai;VkVEx9&yA|̛Im^^֟2Y=Rt(ۋ}{ξzN0iC@>.C.0mO/;Gcjlu@Kɍ́s4FrAN2Ä#aDŽzC͐33\8. zzzfWb2,Q*`{=0ɛխ;XhH .S*oLpZiHQpjHg>L`$5^9Q8^n> S |Wl2)^Tjsd笿 hG,uņ9YS׿>*=Q>O Cxs;ܸ }nk/?Mc4өuykWטݝ&62ig #  z,Qhb*#$Sb '?葅i}cRI<_rԟGvbdb.Ä!!&ܱRH2#id$1P)L1P` a^bn _i2 䲊.ޏٝ!~Y'y`J aJ:F:6# ͨj&{+|+@4#GS٭-%VH7R5ON"}6]C&zV>拈VI7u>vgyק+@"&whXoƻm)'oF:[`lipd ӯ4\$^dm}n{Հ]>EK Ĝ#G0ɕ P Xb\BmX 3HABa lhRl2_yc"APD7 @c-#5+NH9(r!>wOC[%/x8Ԭ'K7r>|Ѣ9S.׋۩p&[\O!ڭB/Nrj]b0Ң=Z7Q"NjCq,b4dk5"p)It4A%$5xE:e6!gD֢%KDU"I8TQ]5LI*U,Ü $21efD[?,82XA|ܲHX{J\J['PJ-T'5в rh˷I|$ρg\ '\/f+{!_, :'wGx[IR%9 4-΅U#eSOFp xܜ`WpTiL<5Ь歨G xobd>}~5ۂK|' !Е@t3qazxKLii콒pǼ'#wABNB-QıP\R "2d|J,ksNKaϻxturmxMCrs<_#ޮ+󩔃d.x<;ϳ?&?c,I{%cwDi0+])L9qMC×%z~/ eZ GC/Hq/+Crwΐs]ސ[A͆ ")X#)BMe$5HQ`_ ibq*2+>0qU^nPA/H|@:iJ3^4"}`CӺø;1A@e+;sCeJ «_cCö2EgE/ d1_gq!6#a!ʊ_':dppCAP%u 98]CnVMT8J?2»_~9pS";腲lUtvkȲa'z^5ģj3]|g:M)~ܨYӰ|Y#%drbٞ4%ʔ%\7do{g}#mt_7}sGj[VANFFǣ;ζ6Ǒx W?"$b1V `o2*)Wo6v)bRd`fq)@At <`$afr~s;~s综9*Q0B1a1%bDDso{{.>ϟ\4*ΥZ޹T;jyOlSʤ G %P#2cG$AHqFE/^OX|XؾwH'ffG;+JMK/JLkr^OXbZlр j= m<Y(x-rt tp J "Ip4F܂:zŤs+=`0+2n34bLkVpɆp}J[]M?_^?:\-~ju C--9I iC<Su?NR&KPU+38fiO(ֵIA vBi*fJoqXKD0[ר2yTsD Sz6X38$MdLg1{Xj$w:K&-,Mc#^g 5J.ZⶥLKciF69td/d*S#YJZb} qi~ -'e(z(o'ӵ2mx@'үhc))`+7nKo_ڟ g"sv}X|$~3uKř쑽tLf&AwNt޿o6Vpe'rz/hԕ¤.lZNr|6i2mZ=5sep>)|Rå A3flbG/Isgbi/ (!|gμa&ջ9\/yslO9MAsç>~r-nThnPS AL,@M4 _|O r;Nr-=]ʵtw-> 27fBxHj!?i=` Aa{)f ̕%yw=t['?>`XUCcn>frI=Z#5)^^ٻ˛MF 2i:zjGGdQyxqW⮁,02?8_}.;گqth F4X/s\үF9!Ҙ+J>3/{q);-mm>(m#nGU(d9`S<^W#Q!1&3N9 jDSPX2(cry)Kʛu Qf$‰sm y.@쒳,CsD cRZ]P:KQ1eDNTFJ_mPnzz3O>G{6/bie]ҵw:'gu{ǂ`(ޔպQbbd+=p8(qƇ-CIڣyMDX/UЌ@XDxU,N)nT?% X,B)|r(0DW) sThh4:-,dP*@F :U4SbV(#EXη'n5FbZӰO __.4]/? |- PO}zr t?s o6*>qL1˫z8=we=z xt!̫y0nOO*(庂hI1nRLX05V GޑQ(^-ˈ/c:&QW) 0eB/pv-o踳ix+ZO/] !],IBdAPb9-o;(͕.Bj YqZ zJ_Ŕ~;3`qQa E(}F7Q{U<0ԉT<`Y[?-~X%F' B m("5JΙ(U}yIT:^{_Jta:ݶY+1kwc:W3}YsiPi]Q:6P8AfY3v{YةO?;H \kn!ҔQ򋎒RF е[<<ŝQf /˚D =XCapڐ D+L,mKW`~|/J5(K*H(_":KdX߀_R| w{̞'K6bXb+y53Gcl!DV.R$Hc0uk^G̗;WY Q#5';> b"ɞ.s-Giη+6]֕6K q[["u'A>IL i\׻?h N*%4]W#Ig?-?» EQrHO܊$|ENp#9ɖ0Ҝ#;nN˧GWd};ѨYbenTbUW}Zߍ^ZhpnIm}o8 DBITxݘJﮅ(#{ 5NoI ͻs<7(<퐅AQ02`%A{8SâGai0LЌ%UB ͯ; qjJsпߤ ,}zizv(Z0ʞ7R1ߥmwi]|W\pL 0V@*Jb-(z3fu\@#_L-xϤ^L.4'pa? r❓Py۰J3c4ir1 2'8R#&(*ch%#/#Zë{סs\WʈRvL54ܞlm5Q)d%QB'Y 9Z W9;^T:@SC!BNEǻ⺕T묕BA@uJFŇ< ^T )t (_K+abPS/-`!h7We'R\ShP^:!-R^*QR)0+G0"T,Ԗ.8ޢ,PH, ԩQ_~s+I B!7tP\?OR 鷓S }]Їa"xڨu؂ 1T  GBʲdS@&Ȥ^hVTHݯ_i~,<8(%i0~c)M@xYܫ.5pv~SJ4䙣ԅ+~F }2=2?g7r_"]KU@-C h{| $?!,kS֓Vӣ˄9j?E) GOk*B:,ˡ5="gyך%&r:!z!iiE >yf-]bQCsurx窅 ڝ8UKvH;Ԩh~<ޘpH -1RUٯZI\)JcGi'ڤGɹ"5C:1Rm3&wҖ{r/z?rфY}pD{0H͈+3s%[^`6aK쟩 jFY-. )جN91SE!|PZj k$iL@0΋by ˯dF}y;n697{;I;zډ-Ԑb$BBARЀ>⻾E&{E`:FW#՛䋉W{w.]дL2gÊW-|_ǐY]VbRpfgcmtaBwmmr,9 H+lE)̬\(+O^a, \|+XАv#,dgkd+FpA(|yɽ[`]č )!^4UNs@"c;e_7 `H1uqJz~gkjw83F١C]]xf_\u{V֐]L%{\\3~ͮs#0o1_Dẳ.f.ו&wwI{0eZwynw, kn/q-_KP5ul7j2X.:A|ډǞUڛ ص)r5 \!jb7Gk=5$m93ĕ)U S+Ҕ&RД8kK,s8!g%Qd+zDIu8ӧKlQ< .F%t{;_cY&2V&_b 3T;+ u8`B -%v\\2|t->V-/N3\N.t" rd KW/aKpRL>s\jbs1D&ϐ`I{vFg2Bm9{Kp%"]" Th.Xig\KOUiܠS ۆ5S>V_ƽ//'ܠ^&~POtQ3f|_w@Ï')MRSf Lr;NQq(}"#I Nߑwo=0^? qOjT?d:[yf8~~3F}wK~F' h>˷{Č+`_DއǢJMYb$sJ=Hr'h zXrrmkuYd* #%%gcKM0* 9R)r Lv"D|`BK3T*7cn1U@ТFm#|f m ̀ʸm<+~E4;\ I8E,A&!%`izOZs1g`e'H`RR>V7_cZ8.`Eȡl Aj@fJGk (N8q(O(E!%V,sSjp:*#WEdٱSS_ aVTo0\m"1H8\$0I2JK挐1qbלq!W㨮 v86H5Cxv4"_\gq8)exOoGWߟ~}V3SCj,0fOD}ӴѴ7qG^vBZw󇐕]Ay{- e܈,hҺXxï8eگ$6MAX"_UEʱ:۸VB`X$l`UbЉ,0 !SW 9 bM:X"'+# $(B.Cv~Mo+o2*bP,z' DQAgQ>Ulqe*`tNUe*hEIf.R2aCRg UŎP HX ac[Jzžƒ\E/pٴ*}[툞 53/郎*iT* :Hwz4_*ҳ޷jo(@",ÝFߎѾw>`tyճ襠{xգdr"NIR1:f auN5%&_eJS@ZYSa#bT)%PJ hA 3֖x@HcK(󹪈:=|?MĞg)֙ZFa[vq^vn"ET<2TۢZr['nAqōw/g }q~7dIjp*O6ѠwguWܟz\iF0o2C:([5"os2{فuXȳ}XW>؏ Z]!.<*o闾ѽ%v t{a jNŰa.t,5jy9'B&]T7|\+FF!a/yi "gWU6v4 2=@P䬒E^gŽ|~ vBz_IKTSAP=Q\_C' N3Ʈ e9ͨZԣQݴQJ_ aKmޙVlISp <)WT0^SqY}{7&"F쿿OD*\ՑEU쀿ڎɗGS@7*uaGe$~Kx-֐,hBCޑ+w*˃]u*@^yͻ(;+gZX9+uԎ|:?Dͧwm!#K RD샕k ث\1z/?9P$pvY@>..#E:‚}7nETHMrǶm.. }[4ƣhl3%h.aъSũ+^y꧲[E긜lheLNiQLʡ޳:tjVC9PeXJtKХ9V;SwxKktqJo#Qew~7Vgog?Yٯ[@tzZ^ȷqX$o>NGI/`E|XN͎Sf&]/7N{׿^3oV U\R|7E>ΓD'ST'uUX&c]D$_Ka7_=+-}aKφȣڣWc'G Ki&gim,l ܡWk [֤<׈ѱ Fv69P֝ ˘SAxths7z3$ !cs`CYZi~A[ǟWyVB:izegv73r2WW{ыwYTo0Oc㮳YMEVs.OO\o<=gs!nanW~%^ f汷Dvhn3̧>[:hF~f/N ˇlzkn}cK I=4bL(qHGTX%@VKbtpny*T;h019!/&Ժ1xpsznWʆP8{p(^/fܔYbDޫncqn֫VJN}.Eh `ֹ|ܶa$ק/uҒ|1F8TEԄ0jb![#rѹd@Z镯Fd/D'SךU*T|xFSB}~,?iKÛfW _aKGpTbvDd|>g중n0Ohca`tN8 c2+Pі4s‰VLj?w]cQJ o.c7'߽.\>Kc<_?œ ^}܂n\/Ǯ0y\_}h$IKRcΝLFսgT8X5DjH5d t@EQMxE멂KSF6{sH5BLI-۫944< ) yG {|I)ȕF9z\j*;hH @C46XmA:Pʋ/-$0˨H^騠ž/E1bݕT.\7:Iga6YX-n-E mNNd|ꕝTWv{ ctrxcg-|Tw{o|\)Vu[$W58 T5;ӲSíD-֠q 4u7AAY{ @Kz{/{VrG/OΨO-:6r#UC=VuK>FA :e-OԡMSגʸqv*矷tk)i[P@b(3{N!$n}8tⳓSx 4L~u U{D o&=ݺ ,&f"eVpő+6wVZ7Kn7M|N칗) gz8_!%əU߻Z I N Vc5.wMrA&2$΅BÞꯪnOIfw>+lPϼʯ qC4~DU"-Qʢ9װX'u!( UXF+Oj+ӃSdA2;4CR}R"dw J]:=UxP_!]}MpKZ5k­!%yCAxz!8#9$-Wd~8zwLp VrQ&/ZU& H&HK Ȅ vH|R =h+&{w6Co@d7@;P-4t<<6e5'JZMt:Q&)[g 2NR{:6zP]ؠKQVʑ.+TG~:@s:ţcO.򖌖/Mdj/D鯅ELǚEWfUYtk]k=jQ^gA }scr-q+@sԩy>`%y;Z45}e QNzƽ~k},nQwKG>˭RJPaNWnlK6 8Qryuj<5%\/R(9 A0R&VQp"Eu*0΅ezJH;5 MD8輕AQB0"Fd>@yN3`-g+v!j4HD;uv8""OZNHe IUĤ>Z^$A%v,PcދdV>iiihciwȫa%0PuSeTz<%gw:tKPI神{0-]F:%ܪp\1+ږ8b &sI 8jL@u$YۡdWUaقf4hw _^?Su2uΠ}tU0j>$ t ɸ19cJHV;QɄ众-V$xaK nq{!9h*5<Ƅ +gњhy-V$N TuNU -o\J$PCH6pFۍz2WY@5f ҶEC6~ī'[hgdZ?vt=>56JV,Yjz'4criT,nsOMp/Cvp͈5nmJP Hۋ>-'6XRsw5[h/J;zmu}! ;.m.[js0xvm$`WT@91HpS4*Lwr⎰ڮ݇mI,_wW8iǭaXķh'k2PbiT >LoZ Ke>W7,NsDt>_Oo W0C.Q/z2Vx+VƊᨯ`ˆVE)-pxVt*Ji GVQWEiK^Aޔv .E/YIL,#D8ZK1R~T,O%_Z&HZ~y^߻yPQ_/cK_;~ JR{G/ UKbG.6Wܽ`3o9Dя1J|>ː%ho/nr.K\e.F;_y<=ܲF>r@Kw)U5Qs' ep:V $Z@вTUNOH^+&?$p1GjYro2jf7ޜ[<{q-?^oALH#Y#t]Vsz$xKʷ('['*r-qEbs7bfZdI0z؏^>~ڕ,jR鉺z+M Kψ˟w*w=ߩs 4њl4^fhqnpؙSV.B$)k0%Ol!(4ZlJ2ơ֍SՉRS(deOq6N_2 ~*ݱҖ "H AN:YX=%ZƪҖ/jRM:W&A[=ΒpϞ pLؼ [4&VkxW*d8V\v7W>ch.j祶g![qS5V5ϙ'bղZ*!&Kkb20uS^2/:#R! *VS.z{ULSznIXT *-,ڝ80r߆IRI4Au+|E5FW*MoRBE5֊/_^Cm Yi_xA{ck@!!%WT̶谚ʲnUtwQVQL7ƀߤC-ˌ?bɫI1na׽h:eq7Li1muTn7MX0ߐ Ukt?%s5>FͰM,gyp,"Bd #9Q4}r^?e+ QQWCX-4ΊC N51[yg PQ-'Z񎯀UKG5DzFﵞO8=~=1qZjC Z<it%RJj, / @fB\pEJN 88C-CXlCu.yO'g雓C' uaN-)_3;D"rWϴ_:,o EhI%~9Wbޫ̷ٜP|W;#Ô:n;h&I:̙1k†$ SIW90I_x=}\`|3o}T>Jԧ]\XezeH'5S?^|ѫ/zE:.ùߨ*h$iOZq∋2D\[|R9ujN˱ԨjN͸a; 3YP$5$+3án5kWY%LjN^:gp1RF<8UHȀ>RKfE.& VvVp1H_;HI[nc܉xO}^>寴~5^ i) ` 'ݔt;nIAe@Z@XP 0=NMpϳt9 dIBaSAJmpAXNMP,0N$h1,wcVz K DԔ6\:5l 2TrP՝Jo 7f3r F2ÕDQcY±jP 8|VT1jRꪄF^R.rV]]t)eBIUf*8(kfʠBD+Lⲉjye֠c Rn~̗2j?6(Y_Q7nO]Lëd7M< Oq">NE>ǫyʑǫy*GAdHlEM $dMD?h4JW C fR>m_*jʱ9ѝUrrs(Dqi3)MR^ljဨp{B{>L)un5إ90E.fk}NFS0e ,6 -M,6S@eac{ ')r+J$PXd=.T\A\@c,;xzA@p9f@ض//InNrcŠL/;|BY/CP@M"Hqy15刺D;Ye'ts"NqȻ`<ǧ{T!X8DUxJh<*rJxd.:G2T+™bmX,eJMyN#Pzg>EH21=[;HLw[.סK (dRĆr܋RU-[T1ցC)(2*%-Ҥ YJY8hnL5 ZF6G5 2Yh_5`¨khk9Q44cŠB?\z0/=bLtM$GF蜺 7$B%=5d7656?ʆ}ifԧeUv3wq1R\Bp ;z49q&SrCeg<$VZIO0Npni&)YJ883#~'qԀ(t7X~<mJ̠ % Hm#~/$4@SqFAO+)E@['cR!pCL"haWJb9\TzI%Tl)>#ّH|ꝟ_4y@;µt/G&3jOvrOh 'ƭ'[O έW[\}s p'窟d'T@KLН |,{|KLZMS 5 ʡ̀n^m=9<׶H)oۜG` CI''=?s=|pn|a岕&6| רp 7bGe~|zbkd}C_4}ܤI&w_<}^Ici?Y@Y,(w[E l5+,xuoVٝY?dqqKc>_Oot18,%.i엮W FmùǍ$9_lyc=hg_,G#6Cu$jYE4Y̊"22"22kB[N.0h>YƧ+1S0.yO<:l 71pWfBgҜ4saѤLz2Ev"a"vF BiRZF:Y - 6Iy'aq`R$qUV }khDƪDZv^XDw:e gQw_vJURp-Q}Ӡ*Ҭ~cRܮp -OQ^3wd[Gt/ 2ZVzڤ\O,2/|O/zIYQK=rq4G+\{uV<嵎nܲn+y_ouwm ¢%#2n- @; _=+.ء1V 9r\so1a:p8(xs! kY{#qE E˵` TS$ƿVF{dQ9 "7\drE%-3QXTQA6'T"ϑ2A(%YEa0aUX 1yK jټmmDo@+Ru~|P |Z<~b\ZL1USNAs@Ov^(0L V> /$&`i |c4Xa &HaC@ vBu828J N"2$sr0L.hhWǓ(2Xt"{/GKNuԯA5/.<H-α\Yf|(.yc&W{cP\ Iy%t80#H\(A%Lv7C%a@b]D:탖Oq0On⻧7@ǁ[\EXo2n0t0a?u Ϧ/zj~ 2?r?l Fs9eo@HA rNPLcuF1A8r`\ȹDi& >3 AX,Q:gb*WohCwv*ƶR_ƈ5I12®#,q+HiGɢdQf(3YQf쳪RHU PcM,9 C!V/ H})Ō01ˤӁvYjB )-ؚ*%Q]t_S-Ei<' DQܗ_G[7)<=unֿ$f fNwWkR MW3}A !]9g6ȜmoYArmBX7)+_ޥ+=؎*4'9 &fY!2Cp !ދ5 (PVy ZЁK˽ʙA2epZ;IGўR#מ4[M*)NG>^)thMjKkc?OWS)OOmG}$ &LW~a& 3/KEF2fF#XÞ+.#6F9k;Dy4>+t8h\"Q|!)3zZCP5Zyˠ%fMc^~A4TkԮꢝ!*ϓvX1䡦(9RAs [DshE! #OG2HdL6[rk'+ωjKp/Ty#b٨8p|ޖ+x8$B+bʐ;%&iFǻ=[Vo*(Kk0F)n91B M9c c',# q6T*1uac euWj刖DDesYYRKK xP$ొ)m !GAM2WX$1عJ)nCBKr_4E<0obȑr3Ua0_S=H?cr΢/^J W߿&~jA&'A[_^ Xﯮza0Ζac>{?c)@R^f:,`xO0W8 D(c(0Xqb$GcJ7#}tsET$A%W"^Z_2(d t;`3H[n9-u)(TEZ{_(pF.tBe*g%΂/ g6럙JIp:R ෣a#[<#C(z3-%0 ֍+$_r~ZI*-J=x?4߁fƑc٧xǨQKP9U;32 gH_V> n᫞CfKfj6H@) e-`9S 0ϘCys.2:Xm5jl,cXp<8X[g,#qi^뜈\JSV вƊpkF˳OpfX%uȰ ߷Ac[,"77-q*vyNį~ g>LUo0`KϦ/1,z|l&<_]w񚂋W/f1|)9ԯ>8B||Q %bR2hCf dfyXF=qXD~!x":;,-ұj^\͐ΥgY({RHFd}pf2>)$)};34QS+PS RsǶFzbW|}K/g;X3?eB7j旃\{<] :|ֺ=G8Lz_!Mբʙ%uw -.9d"aI,}`.VhqOxufn'~e8ʴҬKrS)hBCۋRkth;EX zEXMI w۷o$޹#~HHaf0c Q"5V)-iEH*qsi'$Tڍ%A,][ uRBzpO%b%?MKӍ s$beF_f@o`NfZNZOaGu: )>$hu^vffU&gY59i.1oS~Qo^YNxx7YAY1 fx@)xP-Z/yPĺVO !WNN8—'gݩ*_T)5-jE f{c׮w<|b0jamPӫhkOĿE5q='ayq6~uZh-%:N3o~:QRj`}_3l8b ' Pg5wD| ͶN2|P[/a*D>&HU۩uk*Lb o=c —|MCOEb >wA Lig䰟[s Km?xK{RBmu\4Si!BF5x{JR^eM2 VY"Vw="vzAhn,7ey|R-.vz3~%sI.\ kb1)i`ҹ%IL,3 L1v'̈́$߹+VS+kX0@0]<>(eXm:z٩l%bŒ9=!!/c#6LΨNJt̹K2EƘr׺~][?VLvsc5#S脫s?rDļy~] 1rR/ҿ%9q|%' 3.X3zKƻ r ֤j B)>~?g '/Qm,R a%ňI"HI&FP]&U?nQBFD3W~<銳BnL'JE%b]+1/jhk@Ѽ\X(m2D^'pdMŵ5Y 0>ɧ v6T{mmf)hp1{>KQ0K{| e9D%img~dIx; 8t*x x_[S+̈%:c‹LcgHQGbC3 覌էkV`c-ENi^3K?{b=H'Qϟ̌O0;A&g)!#݊Jbӎw=(auFM:ux 5Zj(JB%h%X)^ݞ*IVy_QWXr ƪ :伥jG:GToOF_W:¨m X5onp]\k$4G8f-]81%ErXAfG\BqJZP2X,3 rRҧeom" Jx|ze#nAC]"Pz\VNJl)e;%:]S.QK>?(Zv1;0!#BkdwJqHS^CJ+7Y˷l55Qp<]# IL bBn%xq!@R, ](S'..a iZ_F^]w_1ewlq.Z̴/] (JuԖыvOʒl9VQt7L>lU@U\uiJLQnXg:,4\ZϸYAfUy9*9Vk mkw!/iJl+-86P!ikZ]fJem:g/B]͝)p0?$Hѳ쉥(cXgM()<܌FIC7M\?hL#5'!Di-d#4RE%PG ɭBW8c5z<ӗxš-s?hx=4| { z&m2q^L:fϟ)Qٖu^؛w^XQؕpNPk_1H!N)JhWW: ZT_L%ƪ%HJiYzt:KOt2 N]tYBO\VpQ+TPvZR4]y#b)uEq2ڴJJnX4g˭}0|U9QxˆU7Igu cR,fwlGZdJU!ݰrn[ZKM84a DaGm3'5S^j=mx9mĸ%DXȊG O:rgE/r_lO>7h1L@'`U`R'Sֆ>_HyO1Y"lLR]Lp+=\yPNԹM fN- <Ǻme$1;g?)ѫnqqqn셽XSf:ALIXp&vVsoTQ8*$qQnRpY1NNOTb%`!\,Lv͐Z#t &|6fog 2 .?ObgFe2TɄe\%iƈ(J@Jbbq"N)ƆZ1 Yo` JtqNMlJSv_ۑ ZHЁ @A2 }$+ "ݫ%v1M(6`an:p"#8#sqİ)3r>kI b8a74)捩R>h_"(0H22|TFL<~SɳEfb@p*e:4t{),"@1GUTH۾+/n zUW)6iz)Q'%3y)$4j`G^FWAO 8/XK0l q !,f Qq4L3yjq+2֞y۰CTbZ0 iXB0y-uw/3LT^} @_C~4t&)3-{9g&pȅm -2K:qƦ1I۫Ű t^¦cPFq |8yss5]c(3tdL?o z5W  }o}X?<{g>~~< sT@<9aX?7a7h 9P=<=jD>Q$SH~Qb%:$O՘XfqxGRi Ia)RvBƤk1KCk J GK5? RpP '\k 6F)4=+}奭Q nx ]L*|C`7pL. bC08XiSK*Jzg:,TnS͡AVHC`n!c& dcpz>/A&L /LRr>uo&tcp} NE'b;p[Z`2fE^X^L$Eg e=viF+χoПozsA>U'zӃO'=|:~69{/L/a-ez/ 1Hs ncC-TrW֓\JU(LTQV.Ok#Ȍ@n]\flW^zkPa|ٵy-4x=uU:qq0ـ}wQ"ji[yѹ4.>-ߵV>Djش\Z{ &IeUЗ184n{6 5\#ߨc0m *zhvw堓>ΐ?>%ug2Tn?L"QY?g1bXؤFurasp̱:GF`Q:VMy3p#Ǥ:ZHJ55YOY""b}\J.d|KasVLdUvX5 j"/&jMeryrgze.ĞdJb`uwR Bb"RÙ<lsg|oW,kV̈fX`ʁe1ragYqsU LHJ *sy$n"o%7ĎYp]fLh.h6n7j>Z'+`l[h3 M>GW's+itM,(3?< qYH*wݷO_W2-|J`ڇ+@F8y~[z?Ǔ|Q8 û+'{[L,-z?@&rGT=P@= lr;H*}5!z%ŔRL%zM*AAf _ۂ|+ -5@!}G>G I5@t&޵f/Z:;Ct!=:6qd.J>.ik oOkP6k(>^Rtod?_j"mYܑR#Ԋ"QX_̞TP BԐ}p@cbE )i}(91pbB]Udk/n }?r*2 MG06\vz2J>3fޥB D,U0uT3 `jk[xz989[䈖HS)s:ؔ B7Rh% 1MX-gJS@~lxzRtſgɦ)ל*m +!NF:=5e/G& 23PK X GiL= XI^Y>P"B[^LDZ$^+$XrUquPv Rg F.^}xlZc9hY1k`>y75mִ/sq \܎ۀ HnP$}Of&?{+-NnW;@y?Di)0~܀jJp/3eM \JKZŰ "t^ʦc8Fq |׫<(kt oF1Foܧ#3v7f Fw߇K~[׫7=7^n{k̮x^aXNYAe]0A§&XUIؙ0k̀_c64T-}ONN [P#KM L vc32ͽHTe M?."f_1hJΠ DY7}Bsvuee/si?sIq_,F0k[]Gu~#rI\x9o>(e pZlzKkC HoVNؖ޾O;? Fw{aqHχl?Rw}Yulx&ڼt(P,NZNs9l|\;g{]u/u~T6V9y/t+Z8&nU5녺oSR|bW6Ǫ-^P1"D[:VWHܺ4pql3CͲ@:Q$ŜHb֚~4 sOڂ5}Fuw<1+,#Z{ǔ)Q5u\Шv`v9[M >on#)<묒EEO'󏾮:GCHfΥ:=OT+ v/YY?Aޕ5q$ҁŸFe%XJaM0$R8oVj\d 4-J&FUVf֗UYY`1n=Ii`{e,uȜ8s&@4i+i3}5~Ϟͺ>KΪp6\<2pZ:|U#-cб ~ 7ïEEQ-S\y G˕ J3I#]FfAҖa:Պg./a4GG#`&uop!«NFi)?w>%v/F so&`mWߛ\goGf|Ab,jï0F sB!.,^.bklmʐkUo>w64d%TiRq`Y#0fpB?~?iє&27 NY&_/#: kUB2i,8,jQХCΜ`KҎ0:טB,PC16+,:>ak1.0Z4i%<8\3u04#D/^>P8, R[` &}:SK mT"[#1_ūse!WFxR!-NdF&%97f`f5~\_pS8&+ _AٟRQsLႦ){.,^m0Orx=2]gr3Lי\3P?̝bhРW< G+ӢT 1gFr %#l6?XVܫ45ro̦p"ϠM%<֞}ɷFeI 4̨`MZZG xxMDk3nxk`M*M¹GYG3&݊q5bn) N1EQ9ªlk[W[DKq"'{PwnT(yYK"ƓY( _[] V۟ qa:=9ŹKkH7%8e7(dûS*Ξغs%:?N;igfO[l&_`S-qڨ.adY*d/Fic1xi:PN^{efYZ, 1ǜ1ArΩ$FY jQok3b d' C4dqXBw(7"CB㭥)W8M&us?zOh1GQF!8ib8zj^ A£ jS*S w6eJf!)y|VسJHk1O %Ϭ+3BeǪ,4tK͏^㩗xe:zY>Z.QgWiֱl<0-)v@DY1+epR:jT-QWZ%v~\ՂKFq(za2޽~V7ig;\a|gdҊd.Bi=18Tuv\ڋXF2p3-}~bj{o$ B1P4raBKH CHa!z#|1+q{gֵ2 PbesJѸt-s^xcR:*@$ c )]嫺,hAĢDiPpeas T fPfe}N0*fb Rege

LW̞tfw)/xc*"?O޿$ #|v~pDDpBgG_oo;0F m>жЮbc\,\t MŅd}38^# v)"ޞ]iUSN6׻J^ Ez=eT0I.sw[LaPskX)68y 6F`k$XzNp1Lpl4HbN$X!"D8#6֙+U|P[sNzk4`Kb,cD!¤ʼ׼ WĿڸH&v^|]Ɏz0A]FJIE9| %PHz#-RMx nLbނ[#p̃Ry("YU,<ȥhPqfWD,~AhgjVm{ª0*F(dɧc Z!c+* zErC,  ]wj(,lbit <i@FQhji1BD22"5PS5Xf}zaU 0VrMj a@l7mwI0.I=<G:/~u<򝊮EZ4l3]g=)VFbuwiT=ڄX*MqdܻH gȈK91vk3"؇{)mX=˃i )RG\o 8dF*^fy/sCqOl9:M*l pb2h0Ya?d5 ^2&><}!,"DH[z`U'04Dˍ(NHQ @R zVuL-8Q3KI(@ga B.XN&KZyjM5Iϰ '8\0!0iB0>yV5x&t8̽`rpX,bٞDdZ Znya ŒlYIy. ӤSӘwx7fF9-N㳷&{W"8&ݛ'}ƵdPtD4E.‡eiPζHc)b1p)U˥6ua4Ti/סp>wsh)9.ylnb*׬0Ι$(mXDhWxgbUzzIKdB~ѓ* hG/@T0'4MdxӒ;hHHbq< -|VͶOSBD4:_mK%!ُR6 ^ $Wq8('ώ~%.ԑ k ^WM(Ch Wi _ bZ7#Wwu_1:)͆Wt%ƲizuHc԰^#!)fkdEeK},tT .ZrWq—Y[|\sY,7N)6Vچ `ʇKp*m8YCwܗ5o$>to nJ_c2sozE]NhЇM8(Qx>xyXj$a"6X1h TQF tK~gD`8pB4FϨ^Lʾ*֫R2:L( .:iD1R } U0 )/c6۔pթ"1!%=\y>w)G\ 6ѕ@ ʅpW?{Wͤ_vx50]dc6H0&2J߷(i=b7٭V Hl.>U,Vu05- IG' LbGi$wvI9roݠ!, ݵDuh~إ]wL;3|$#LECܺѿ'jSN}%@p(yKA@e9{Vfa'y,r•B1K9"jbNcCK4؉Ey\|J2{]"C]e/WEpk+$2 q .)!'$=x% !\(Qj-8#l$|e$`艾v)4\. K%ݹ\H<+*\fmTC1`hjM-Vrj2koid{M W|^TT*/,(i,aF6R,JE(g *<8S" F4$-F=oE4b DSG4ET%)]/E0YLf0 E0Ļ5׭ ``{QI~+)<mlFfPN1"TX:ji'('CTڑɴEwMYZm_938hRzDr 6fRC =LR#0 Jm%%PiC-ÎBeZPֳ4QHP41^O>e܂P D29גSjj ")~ oS6e ~ SV9u\Jp3Ej pD%s6e s]2פ>i*T7vԘ_US& V}FF3G 2XI_=X/{?A} 7󏳂x;F 9OY3}Řus8'3Rr۟> |N?>bͩzA<_7h"T^f=M/ L(c\GY<|~p=%>m皗O4h1uF?nfӆ-joi7ѯ( NnMh *c%]5֏72mڼLTxV+loH[̔>Y0rhYสW8D:=ݽMB47py<"WtD*5Y<XJɖkYAdtWz&B$gĕwC%N%(V )hI<  <8L0k EYIsAWɐi%>;Dy5q՛BA6dԂLpl`++$E!3IF(QqI0c ]{#FVw!X{xwFIo u ZBWc *Xϩh/3;/rPFn̖ f0E|dod~\ <4v*&`.J5AYNPY0b6di= Ე: hj"aBmu$"!À׏l^ǂUH~L# `"ة¸ORI={u/Gϰ̎hȢ#{PyAjDY ee+L0&ٓ=23 +&eJW Xz.Y&xԱВ1JۋɳyJ;lpWI4cTq > D)|ۙD0V8_E4}Mm]ˤG)C#Fh <yv)RjH^ /hh>FV`b)a~eY0!0 CM ˒* MTKƅRD\-&$b) ʽadT -:Y@1yi?\- Sj~NMT2%JBV@VB\ܯ)KSXc?ZnN;7GTJɮ7?_o|"i Y?nM_MXX,3C0_%;|=7a1'we T < C 0V:6Py/^-Un5f|k^[##j1v:Qd)"!p˕jmg0ƈ$B ,q fԭ:zt҄D&q?є20@> Y$ss?1/zJ(WJ*Yj) ^Pb{AQJG33b0c> yډ-[M">o|?kb1}?7tW:6]A$RLA\G G~ +*gfE*ȇgC UgWER)lVAi9+ RofOD:L1.JǏj|TJϟfo\,퍦(MppŒseC.oJi}/?ȟ´Tw\<4E5Alwb`Ħ@\@Mv(Gzd:/<v>4rb;Xdd`ن+I .p,w $"RCwMͼ:zrx00#V㷧NvX_̸E/dO_1y\WF\"׸up=i@ބz֖ZČZg(4ɡv mUN @T r+"Tfo>hph0ta} +HĪEe/~?$nVQ'ps u5-W4Z@&׃6 ҽ7綧P3?&Pje,JaP~cp #5bD"5Nd5{~xw n6]izsH*k96{n6崄% 2[&[uN]œs6C84jRŔu7$Np-SƤ dn3h/W׹0I4TBI{kX Y> [[&As5P<'+tKG倵?1SbbJCX{^Ekr#dřBYÉj&yՙy;3rr D*fN"3F%T*㒽 Wb$bl 1`x"J!yOc$Q\7W-gz8_!rNzY @uvnRHv!EEùp(3p NWU]]];װ&i$y`qr=gQm(x&[!*B*4W ^Y|Y&^ǺcV!Rt3;z)\HW5$-ńIq$RG)S9w\❻(Q2F[rӇVhJx@B%{4^Y9Ypk&5th%xž?ů(&{F(JO*~ v*uDD!CoDSJo {@þAUEZ^{KힾBȮ qw̨wT5M{-&M[n ;gv2PӟkVcK-ypy+fFw~)}L ӣsp $dzP" 9;X؝" mFo;_~SjMW+e!R QRmSG 9c4yz;pd>m}\AWa@l㜳Vx"R#ULiZ[F-Na,We9- qx'P+51\4ȢJUt}10줢/^MŠ}ǥ ku>{Bbe2b2zY^!ozLx|OnB{m`#j9jr\wT7b%RsQՌ*tSiͧX#G,Whrndh ̈́,Ev_QDHsThv?2g`:}e>EhDx5fy~aW>mv9a'____xCO :qb dYTf2NldFYf1UKSRS''1i2 rKbΘZA~X (/YX Zt}Ox78ХWi_ /:XSxz%ɛw^ɤb:c`PLцcL1k#Ǒ~ѫgZ=͡:|QgIa yų`XpU_ ⋃W4Ug2ĎxJbLƽ *TS.$^(Th3P[ft[('c‚{!ɕR frAY:T3!3l k *Z_:ƓMA0WܤÉD^VZDe+@_L{gY%'~܂`?5-0}׀Tp͚l(JnSN͋jUMl@/tDx0ҠJ9v:1XI+˄v\%O©zXj؁q>i 2[++h{,tfii5:p+X(T6fN3)*$:OL jSo׉2~8wJp8+l\3qC [1~ՠ>GV--;/lNv>+ő1N)ZwXgP(ng86pB ;S;xNRgX, ^`h_K$i/_ ּ}i>[c:>%mqlaNO:ckַn/7sș$_)N%QJ^]HTD[ٗ/ڻa{Nۂu]+# i\rP\Sn=)&*&rNpX*<;U"F`J12KvHh'H\ ha,=ό+2aK~sk)FRg)2D5*gQJlRicl8--YC y5 b0lqv5e1fERp)iqRRbPVexwM> eWWa!TCҚhm>aF5`}C7LJ/d>Nno7IWKG307 v-*( lBʜd\:#i < L{do Rl4`~zA*)N?.?lh2~?'x>60SL&ۃa X!шr\`~{*.AÅ0 F!v<smGدjhGRh{J˘H `Ih83G"DfV4x!JBC&XbjRl؁]Bݨj]sMXZPq 8 i¨tNJCPgT1YИW*7XOm#"bHaE=Χ`"q%W^1܎?99h-ALLZQ.ʂ6ߌ6˫k$N'sWҵ|o[%[ p`ͺhIQ)+NK MC:ZPj֘ SoABkkP"(ޥO%Hj`B«aIr¢3 EJy20$vZGb/ĺv4C~/(כuV+E}+h HBk=µvھ8~`LuNg׶VTg NdsSnoOrj\rH$lBlMN#ɢ!VuÅwj20&&i8vnohi\)݇2ydo՚)MyB5rap2R2SQa ;g\~DF9.BCo6_l)\m4u\@B[$E=c=T&ԍǨ@G-/ E n;ΐὨqt"T~ޅpAD '1͌aD*&L49icw(*1v+G` 'xnR+dSffƭ8Fs%DPn< +_JPxk'+xЎ6U/fIUIQ 3] FiS{j|`y0d: I&Ts擒1-yYUUb(zFq5>0ё[8ID鶨3(Ѹ?"_:LڲyVcQPWK׀yPC:o[8ʼn@`g! io6ڶn!@B+J(O56p0d݅JPDg)&[*&4HPz 9"3,X}pD.f}>K{jL#~x1^44DS5E0!ej1iC]wH(?}8U"rYE7óP7yUmp!q$!eNtJni# ^2HfRPdudcBs䀻xeh_%hn աZvSH"{\oݵ5>qi oo\KKjaTk><(Mhi|7g<ً{diWP/Da Sh d^!V" 1 9ޗiWkg;|W^HKycd8iX ghl<[pߧ`h.e xPi[R!ٙ.tCHvbw.]0/,O5o.?L&.}1ix3,do7|?mƏdniƭ`2*s=:Tw>VJWQOw8Dn -]۟|o'c.XҨ.Hn-s'Owβw1iQ(0Ҩ56,e<޹I.',ErUڭZIŋ{҃"GG/ pܰa8Z׽MңJr,o{7^_Z̅kR;LZqF bT  BZlJjD؞_Xsam TG :8ϬdU˺ b& o$[S< G۳i $z T6\emPб’P:,78x$i[ZL $_N ؃7K]kFEl0sl^~YǻX`z!jZ4jəoj5UFbK<|Eȯ? ˳=.pWݪͳO ылf2&WZdl,hx$QOXT| r%B[g&bܺBtusD8_iM!/yclU#]2 M0DŽ"LOⵞHG(Bk׮ŵv ?J\>[1"l:RwPzr)Prr*i*>*k)\놫X[oN~xwO~*1hk?<_?%aCev;%&tsQ;J6eÍUt΋^|Ŝ[x%~Y$p5ݙ].īgQԂEIX3mˆPE{M/v? ZbVPgPQPOz]..D d5F{ޑ XSWM12ȑ"j :X^ݦ׉o=M Qct;U;}_->=Fp7c28h53#wun<'v]b6dfa;Ji5 TB )9å@MpF)$PA X"uQdrfzrI1'5ˑFñTCm hHWDd,Ut"dyR3T}0H[ B{: E* b/+ B e!jH H\oҜk턥r{B }K8eR&Ti&Rg1~YH\"6N_TfxO8.qi!҃w2IjtQFɩ :%A4I#Q1 !4ģQKk*S3H=8cZHQg"0 \W,:%lEDJLQޏ}y Q9ȒQ*gT98g g豦 z ۽$2>OT[YGtP%hQ".P!Qm4Fiw[a F(9(CaQL]y?B'm{TpMo:v~: L'SD hi(B*se-8(`̥ι/ 6*j hn"*$D`xU[n;L|y ^DIEGSmtEXHrZhMˁC+hmgx mB]kϧ?z;U-Ȱ\h(5mhoR Mvޤ۱]'\_{m>| l &h+~<%/q)7?Zb>Tקza1ǎr(/@y{ M]PX4;Ls.S0NƄv j TTJ5(a>Qav8wX߆zx3:c/B-!%zZkcˑuk10NQ6=Y;o[;/wHSFhai=#T&erJ.D\26f7g׷7VZ`5ZZ!ų?@%,ڶesAH DŽA#29X5a11_h!= D ,ێ a#9 7ETFENQ$3 +?,~qN$.'q>B MQmV?V`?V?g|Sb^q&[<ߟ3֪J59愦(N|PeAU&TuAɺtrR ¥ MO=A IH:XVQq4}%Z#F}msc (&"nV kL(.AEK3\b,HϪbLw:v&E*Xm.`Ejl(eL NcΉցE%#,lQ=2hqT2 LfIJSm}̡Ji5lGwL35 B#ͰD+PqeY4?41 ƹIN75=~ԂqE=ec[@勤秓?%KBQ+lC0Pڳ=`-(W $|@p3O 8XxQ*d %t$&)%%8k:>-`l*;d `4Zo{ҞÈchr- TRe4Ѕދ"PDsB+A 1I:gJ56 .KCqg!`O˩|νT5+v@\}.mgR"AL$@ 7׃3K-g7+H4i1{g& Aj)?X x xMˋ:~&H Bѱ0G4CIh*i A%Rt9KmWDƻkEs%QOͤ:0&&dQL-f s<)f$`8TQRܨ[7p4L zcF(* nPSvݠV#'To{ENQq0dTt_32d;I-'ue+ {JL!I\@8QtD'TdODidgӶ\EWcۚ !Hkm55.DR"BiA")m!ߝr8oyzi~Sw~w~2 /*^}Dm::uxN46İn=JjS>6Q:gljr8qSs-Ii("klעz L=$S%o, kM+]ժp9~OOYyRYA;suzKWc6i{%FQ_lÿ1T&ʹ+˵hJXm]-JM>jΝЕɅݻ$p2mdwLy$`pY> =7K~%עˊw hJ .!>t5ˊ9]pQׁ.*h1л)uDyڞ"ژ3CB~!/R mYook1KwC3J:vFF" ES 60d4h%BS#.NFLkiEXTx"p< "ba/Q%,Ba؍xp7a%lEKp7$RNݍ5*=s Qٜm֩8Τҧ?I3H Iq!1 ISޖBH+*qrPQ(ɼ0,(du_UH8p*4!8qॣf3CUV #rB@f;c,ºH29`Ia)s}aSrGGs67뻇М5 ?ۭr${t}'AꖽM 9#%R (_uz$3@؏@WBK5%-#L CQ&M\U3 F+s!crgKE׷!A FDeT^ʠ9eK\"CAeYGFEAkmc7l? &tfRDOV"ieD2.2D A"&q #P\`y?$S}GgpZtR'`V={ԁKk}Ε:Ɖ 8VK*&MPퟔ#\< WV:}6Tg%ѓ6 śC`5ҎV\ʎ,M,UO6C, &klNES0&wF#$ 3yF:(>Ck\;Vm m-6r:>nwcPzrP|rJbT15o?@^y/J)nx=xzsTL?&㞈7zMx< >IrU[=6545?=ZGSBcAmK#^-:2T~ @z&MIC`5L33L K5xl%t|wz؛1un1?bP4NbN+SMߘ%UvTh#'9v4ol1>@ȴG{wL1sYI>'j|^)+ #v$ @|^x_z[cdUCQx_trȋ4ICsV'@d!/) mY/)|DRKNh쉑YIQɓS!h-²",*D2aT9ϴtPQA73+$Z'gyǭMJ4jSUbGkN1{%q)R$d(8q-Bz˕AMIԻ $ݤN ܆. ٴ?3@2ǹaL"@T 2*g<z:ׯ^Q"I*8.!)cBqZ0B,9:GB\xVuIF"8'1c (8\dJ2qOyp:ũ!Kqszq->T1Fr9꯱nvWzeD(=m65UE p)Ƒd`BfbTIH +TUVbke']Q-hR)t! x0RQZL$Z:>vG[pԄϭaB&B%8K0t֢C@AsCЩr=e#-t"RB1bHHaѵh烥IV1sU4w|;\XZanVc:[w>/yX|,4OͱN$M1~=X}襂q\s)t뭗z؉%!τWB}q)G9O׊D"x™P&e6T`.Y eb!0 Id[i{XS) ]h؎ v3kS P"+J3@.Ai[5«Wjd0T 2ev%k-d%w%0/UtX W )wR4 *:;wX]/}<9Jw;]?Iw8GYJc$,מa}x%I⋏#-ݵt#%ȅ[߿ UレRR_|~NwSUvK֡0BcEX KԘ{œheK!*yaN DFSx3T,L-S BKg?fׯ./8Xžm}zM_e6oiX;0b566n'藽}۬[-7+yfMUS0TPoU^f }wuɚ-$ Upbe}Q9դasiy8e5A.0W vŸ囻$Pc@;#ܦ٤筃>h:rKfށ1;@xz |Az߽ շ^`)挶Ae T*zAOx8T BT5/ASoU^"K򹟖XSԎa_/{23i3t ??Gɤlw;5W_ew'^yëu3q_O78 kVp։j1\sH4aU9CgYF! 8-ʐRt B =; ^JIz¡N4w%)l)yX@K͞ULFM6tM:*{QF09sY̵Z;`hW5fP"Tm~Y !bKQxD_#A"CzaaLEC\ هΣ 6`?Jy)!gHw+9:s9DZCcF^YigK"=jWyr 0W{fm48^Vo$Nk9q?FyG10 r7A*Fݩo]+qWgQvǧR ;>د4Re ietj#4Yӽ\J5T*·4vJ[<ڻ!KdyPIipLbUMX"C&Z#b_s+j#d/(~/iۭttqmb јt{ةZEkωK( tKb4_&빙ֺQڗo6jU{zJ-l:2f߬p)'SjKH)g$ pz c$MI .%YSTxLQubzj-L#a\mӽ[\X+dGvLMRڛZ%)md!=KsTǚ!> Amzd< M|0kD{*KDGK mwt n-vx{[ݮoBAMDztt֭PZauÈ{-Y7ƴ[6b>L$<dƉ/?ӯ;*&gV.*:n3؍ $wQ2 IP̠Qyvoq,u.F^ciDD3~ 4}@XX&@oos ̀ &{Q51Țt0t@jn?BvW͌P@& B̥?ܧga?vÜyq6 d%.d1Җ@4OlNfŧ?`Y‰TN&Tr㸳ނ1 (9`W[w5 NopCb ٵ[@b@4h@Ud4x.V*L(HT LO)$"M,H-<**㠒Ho `ЖK,"! ")\iOwI<$tؼ| R4#4 WTsJSiA(`֨ZJtJƈ(G @ C|\eGmm_\=]2T]Y.ޠ_Fߙ됸w~63dC$1*}okRM]O?~#%/BLGt Ӈ@d⹧ăf I-%DPTDAj u:`L @I~Ԓb۫ͶM#F9f~s_^wyZyqMmڵkbK;V ? -kɶ[U5QDKh#*f-& 9gnM11Ϩz%‚Rޭz6,+7&6Fu q-b516|Fح?~wB^[ZDT#PwDPbe: Q˻m?֘4ֻ`!_lS@,4E=Orr}xx2w*sǡ2w*sѦģu3>1c04I4%RˍISAT㗶OxI4J$KuX)pv{j3^, S²X;<\Aμ hɿ GGl^wvuWMͣÛF]o]a2ܜBx;[&.TvmȾ)Kr% G)/r d,s RLx ܃gunx4АiX&%EEHJQ0W4 80U'@,! 6DKˮt6ya.LhI(oIb#̅ j00IB՛.s%S:1Z2J z̲cM Uir@"GM8.De/0g˝L2L!QTAC*tp !j0”?MCR遳Ցh`^?j1p֗eM҅oxX#4 5S;ԏ1'ma"ʼֹ*M@f)(:0gERIZ29¥pS%]&bFnUQ ͇Knu%5G⺳`ך~NbJ;A>H4` ,#c^(.k$D3b"ə Q$~z&0KOԮ 9y b(Q [u0l-lH$IzK2﷦C imB#9c-kA ׁ̠aP@iT8v9[eJ0 s4>SX9P(:Cړ\ 2S2(IdHAc811޹Z<یCIR ƚl@`(I9x4?wjRٱԢT<eLҘlt[{Fցə(wB񸈥E.321Bj*Mϥ9|x,35ݚ4XW-h~1AAۋp+;mal,8;hG[jHo~''OI]Vab,z|abQhw7FuliJ. - !;@O c4]"]\$冈-}gK4VA#m4pmɏ.̿VBkgW~~;ֺxfLFޖydnvWG^ϣ{lgxe#V&O=FH7=΅mXsƑp7>,0~ՂummXGMmZnjyJƌZ͂wEVHBΌԃRa|{\U*)V`5#pwF=k=8 8GY|0&}ZP(?3eE]"hYjY'Q>^|*tEv,¬ԉ%JS&"_qATƙBz+'% kH/۫5dD2sDFZh4?‹aOƿyӋo*w󹻻/blV{7{m77bLgy]ҧѷ~zqx-Os3ʡg"(Ҹt9X6TTzJ()8:|ۂWhiVmފV\ÈuD8X͡e36KULblt>Uׇ I;QA(uzu.|]ןb|jJw KT*̈JJK ۰G,<&{Ci$__Бg\=.@-MUhzʼn*9^uL‡(u^E/R[2mCKH/s`XWWݝѿtT!|N[\'ꧻ?DV8/޽D5( c.BvUY/fC+% { (^QERJt_i՚>4W߯֗JG1dUs zAQ.&mu1&ݣb-E~\=t7U,bzZM>r t٘ڣt&G=i0C잼s >w'VjhϺ?hս+ZHkh&lPcq|\8<)-Ξ_+$i+* 9YgGl|)&΀I洉_ݽ= `5Dڧo5^[qS,laɄ AR! Ν ISc-g吊y]n%Ng៍w.V2O6@T)ДJ0$<1J.u&.9OXz;Վ/}81I̾{14`Cg!0U{Rt(WwٖX, DqyLod2 wҙ8 ; &`Ӕ\r$Q$?69S:/z ڥm|v M{TiW]"ަvfS(>jB*Lf|(=`Rz*(їW")`4QM>kɋy6b6VI0\عuV0Ǹ~~Q`&Ghi ~]>{(8]JNKy8cϏ=ɘ58T p/_B-澕$%-3zrGAPfHԿJ?+AscGEѦuQW,~-~Br5 819Z*nB=~s+R(.jLR hb7w'bm.g)"랸+tY:+r/S>ԌtƤsNvaOOqL[ LmB5n;Li |(T&=]`mv@k}{Y9p)*RV"e«]s-0S05ο߆9WdaB,gS?\ 5:1`QE} 1XǢEma>iB#Bh2aib@zXRLd_5ycjHΦ^M^$#N8&bEhai?sӴO6[* 2 m/"oxB==^nE;5 & zx1lcnP.?|m4>!C 3Ҫ+2Ab` 4rQT"h_DFu0%;ƾ!?zRGv[&[_ݻhQ*&FxT1ɒe-Ƙ]?+ip#r :@H8~ikTRpR X٨i+ H}d߮%. j8SS XGqii)k&8ZAoH#S>ԫIhO}i!t^KZ/\滹}:>_]\VPݛ>7\fTXIGyxޚm%P|,U[ϫ}hPҥ0][?*G39]EXZ)-wBe.Nk=E4P| !bpۑ^ѠDH 'iZlk:0 C1M^Hz^2h@C%DF (y g5m8㿳loގ7_KɬLT>}LP 5J71DFDLCɘ#;.vjΟm(@w̗J|NI)-j upckwW Υd5!DƤ4I檨es,'hJmv-@ JrKV sɐ` JYTDZ0®1I ZVTBQ8m&q]:ڶyn#j0 ^`bPu".KńZꄾtR}鶌ެ!q@~W q.9\oW6<%\z&RۧGfJZY@r$b#zq-- פn-"92{pT=?@?ގq\Z˯7ggH}&BiNG?'Z3:Ϸ=;p0T,gD)3-\\`[Ŗhܘ%Ib~hpf);øfI4g@핝| }J9290-~fεZyg|sUg:+㓮 &|aڼ_S䜂DaML"* lqbxvW}}O%$R4ϗ]Rbjc CMӣ+nSYS~nSYv­5 ȠI CPa@+R*5s) ٨d%Ñ:^w`Pjʄ;!)[ H- .no驔jnrVDCq#(dCKb: (^BP0C[jyg4_[^`=OVd~e|I j _|,8gvZW3EzLN%}m1KIQHIiiwne ѥݝkj)2/4}D噈a^yXS+1=Os)^2WXӶ><#3W=TIsfL0]L&*Bz!|9ODX} [ F@٩й< {%(N(҈N$ٲ{J֒K+jԔF05LK3Zoo8_5._"SXeUF*ͭ7q%KDQC2H"F{1Ԍ2C Ly 5^%7uȞO׊>5HEѯgՐS`bkV>Hu㽏:I*-/*2R2%/pDQ,ؓ4״[7٭,jSWg?'Q6" CI>vz&O1d/$n˧Pyi|^z]4c"Ux>xnWN2̨`2/"ȊHnhƮ<Ҽ >&7{ ՛e^t!DK; aTz^XB5rڗ(ysCstQ;I}jMַ|aa j͸9gY‹5a[RS35%ss=>it]M|2<)OZřU"W"4YXWK{S#݆^JY5ky`phR)e49YMϦZ! l C2LIo|箥R'g'_טΊ>v|H/0=]|㻮7X!w" l|}_ΫBM-}%50= cG\zpLdx- aI}IFnqP~5N*rQ+=g<۪T }gB 3sylE["6ww:Wg[C Zۧ䖡H_>Ԝ_Cg p4ZDms OB 0FX3߼u{7/2'6{ނ^ϰkAf( ו+u @z.gGu@@{>_ y@vΥ.4kHN c/Rknn]/W$M<[WMJT0UoR?x%^ۋZˈchI[\'HWk-gUT#xeehWmI :|xu>Qiѡ^ _7V80c4Tڑ`vah';+o(*L!6}-uۻBCTV[{w({ , 1.VYH ZVJ 8`[0s]46l$|Y<"EÁ{L=E"~ȃ<<<@F_8_G?}o`Ti`rv쯕ЈW¿;{GGk/^ELp!= $&.B+, -=^ӋµŹҲ7߯dā#ż!Fۋ]N^֝?tKцR/ (eͥ;%z ZʥdZJ:*XS~߿uQ\~㿽)Z1q=>n> 7r-m9wTNPC8G!(htKb[É2#P9O ഠcٹPZXL2.F,*UN R> 8uQbRJSj0.`iK>%T 7=UXU)I⚹RB-hb"D#I`Ujk j B>]x0A- ECKjLH8Doyg_$C9L|`5|y,*ӄ .*$*-Ѣjs@!ZcӮ'ih銰&yέHD:ȵ3$D X4117+ m"t>NACJΞ(A-m%R%|Y|> Y.+6~ n 5;}zn!wMZ,A~D~x`R7*ֿ#{Aj5tvVޜHc-3;t3`vsqgɖmX )G?!@["f< 7:qI T??@C(:#Q L4J*9r*9r̫4rcI%s 088#$,։qK%FXRh6O BqX޶VayC<ȪZFcy.0+ȳL'\JXTPAG`Bx &7`DpNg !$ Zz6rtTlN):Cf6isS GF M  z 284Ro!Kc뷗!BxjûӦư~#|!ԡlR =&u|Y_}@HlฤQr>@T +B{wQoI HǒM:3ٔaf~_iy^ ' R'H'n"RX%p{2I(N*\ym+Zvy̔,Q">h鴎ƙӌ@׃pƵk0 #iyM^ǬZާ4?U)9 3ow8j~VL4:ybe|DpB܄?\Bf4@A&p-D 仵vJJ1$>Ie˄> )3'i64)AIQT1rQRiExNxG8^R&KE 0 s;n.q: i=OGovt"O~pF˓9w!su槟Z:}_w:dbB| [p@)'\m%qɛIi0Z[x:g@C*6Ĩuౕ@c-Op KM+-_6T)SB Zǂ.'yÚ"3p#&-!jl$XbhܴPRlcS*T1_|MЂ-e/5B- ׶8J!H1/MXV~}+gjC*K?L}3'(sFV=ty}w녥lN/=GV<5*?j^I%ٺF_w!Udo'tL *7o89k3uX^ۿ"ElkV1pA>[f(l2Ls7xWPpΕΒQ-UE`:K27!ިqFO7A0FK` _v壞ҶSltK#Xx ӥ*ysp J]qjힹvw-M0xhqmm+0@ލ* 7Bm{ *B!)i+!f)0h8u Ax4HpЗYӲ+)yL~"Q^愿(CVhѲ^Y2K>BB+~ۋ`ѬM5P Q6eqy_jjm[ C M{e707b['/3OIom}?lw?jcr'$:e~gL| ͫ YVc 2/OgjŲ7+DZ^zK}jKq45`7؟=jݛ]0A--V I7.A2EPwcFrnN>"vɶ[ݚo\Ddʲ*_0?ܥ-!j\-(RΫE/VݪɕxU^b9;QOVVQBvj{bZ*-擽vĊ1+@?"om:x{Zb0}Tgr B S"7 hg/6g0%%1&vVr#0@&Ify[lr *`D͗}:ӥOp񜄑ak=ɷV5P$VS.l-q%XP0L lq24a)b zO;\Wwzjb!r @/G#6/q/H Pm[Or=jѤWBA!RI:Ɦ=])l&;_?٭aOSs4зa@v;`q1{s!\\A.s#eDoaaaa@ "!HQ!hk!ɘ0ՠ)JH,IFLHX*Rܽ^@ABw;ezB®Һ\S!@FevaG) {iE&qDyd h?(4KSwq4ՏI( 0FK4@*`G&P&Ki 1UPaƙB!C3ԃ݆U֎MOs(B並I( ±K1$U"0@pmIY% #E߮H3攄voU "ߚo3Fw;R/$91b5Yvjh2X&OFA^QLq!1Ur6Lhj@JMmo o6rМv .\ݜDj9xi|2 jnKoA@5Py=sȘ_|g=>G< E/ >c1k: VUNj-Zk]<{ӭnߺt|r=~\ V{ 6 yp7{ 2i O{B yiAŐZdL> G'+3v7l9\oDj}w8Bf]G2ܑxz]@턶. kG:sy U*YyX<<4 rr>6Yj8^8'R:#mWGqʔ_6wSzˀl+Ǭ#L8b^:`sq_u1[E;L0/d&Xu|PCdB-WNgF|dƿfT/ >X]J2;D;Ruzdl6e 8.vl.MrH @ǎ6`5 G.aҡ*|%kD3\iX8˲K"hF2 2a"QBLHH%Ba2-|T7m/D 翛<=ԋIO_UNV6v#`Ɍ_BdrG2H@Q8툧8yDq#@)~+ MҚ\g./TǏT/#uep»bC΀lSQKT[ek.ɹ,WS_M> $fNONWd7:u8zqq]I}aR<9h*ă!掭~G;gd ]/J bi`cna}'N]OG8R4'm2ma0 ڐdwk0 g/  S i!!LF))*NH!L"BLAKza%{N*=aq.^;bw;O͗k, ')1b67Y{c=Ӣ̔R2d^4/.-?NhVū>߬{6ڸ7k^vIP;ii$V((#yٚ >/x,$hU%(bER6[U J e(`8q,hLRʌA=pLЄ@2*K!/ \J"IF$1,届9G)I (XDU1\ Д)ps Bk/#/!1J:MHhXai6Ls~OR$Qr1!1A!,(F,Q2@/:ʕdeYItZ ІD/(Ayb?qd>#BVqv(~1KlEc_Ol|51?b!<@;\krZoa!m87'w^}+ݾʊ BH=au'o &B9x=ܩvW^}1 m$vؒ%9i؛>7r0@lyI fnTUFzA8{C .~_57'Aj3nhA08A3Ā ܣ ݺ7q Vه=\/BYlr&xqH#ғY_xorxU\ٹY ;QOEmx>΋=[&[Qc;g`9 (pɻQ5%}\c]^܂mw/@Dyk%܃B!AXXqc39`NO2~ѵ~HCgV1AKZ<:u.PNj* 4< &Ot#:pFQ9QLO㺐/qBHiE$$c&@)RX`4T T\?yzBKRQbKZaBYb `gѐaH1)HF\kW5'TB{MdU%yZ?F衈!l~M_Kn q,eY0_υZ q׀Й^^XZs_VXw ³C1t ǁ~NJ|Q\48Z[1grIu$<>8bP{vdĄ $wa?-OI Tg8z{Yl7nߝ lX:yK5Feɲ* < 8?W|}1lòpS6Ӣo$Djѐ0ʇ4{AL4V֖u7v =&;~ԾrbũйsU-R&V'^TB(rQ^9>N)g;OCQ? %mY&PK^سsܖX_7*9k y{!hk~* %dgU(-!FxÈ^sC=Z!@gsp9iu|3Nb8Vs/0bJ}rDs Z/3h^`'|:=~酴O .+w?l/.d'Pdg ~rAbC.b'[BSM.Ý${Atgx**i-M3 0v΅/\vh*ރ Yv υwTmnkIE,k- U r .v%xI$i=݂vOwyCEp'Fs/خ+๦1AW{RU7bj C5(KJuQ*iY I0p**H$Њ!Տ) cH  zy sta @QBH E20`r;U^6[El}׭-_IZt*4v` L:?)I?ccyy:VHoeot᩵<*GM"mX9#f]"h7Bz Q2A--VeSo\DCdJ@PALi/[(.:vyʬnmkv$ڭ E4H,j:%dB:Q$C~ևa:~#f beF(vH@x E_H@'nRU<$r]2{WC 2,I7H_#7tW~9G·;8Q@Ę2rrCpV&,Xvv(U7u>K8`V%M?X}R1A}~~C~K"up9Y/[t<Xa;~JEPNsQ. 4̹ك PTJP R "њڀzh7rJnמsMh#G@P3F4EhyƜs)|O RR&r~5LY0T|-%M$hsc 4>4=j)1("){jPœ_E)5JHk]兂3brS))ߓwʩ7rT+y@ʨ#zbPp&u913?kX7mgvR/;&ښ0PAd${-%&FeDPPQ86LQh'c>*#S e%l*I+O?MhݶHiĤM1`A1X\@ID)!Oew@ ݩkdb{b~ނKCOy}E^d{SC,eg:I;bEIŧ<}딧Nf&1dQZ\cO6=mm$O}?؜}.[_}VQ"}^O?_:g 0%7eo|iE}nC.?&7o5ӊЪY14dxwul4}J !o?\|^5<)]'[@h 哏R\#SI{n`sS Dsၪ|⑦8Xh\S[Nۿ]P].؊f.._"[1gȹ*!'T x K ASh@ZL Ga4q,pJRݑZ7dv韯o_~s3sW"E94K˛v:kn4 [̻59IsuY{d}wkr i!Ƭ,~c<Y$d]|뮵_\lĶVkB.b%# ߼;q0;|,eJt8ς8L e5Bť8+1ќ74BQZ| wh[//ULEt9/d:+/ *DY&:rgtGF ~M9&*VZ xYf)ROkRrYraR N|@%F/q+FgV 0IW*9ہ1{ +F1afF9-Db gh2A"7B}]%'V ;཰RR,A:A0A*-hg&4zIeNjVZ45cF]vMz.5BzH$vKȁ(Ew+ ([AD=7e(z`r|E˜fd`aDDr!\*I6dz|F=m~:j&V>w%߳y4) A?D)xidzͅ ֪#=oaZ x2,/Uu_-*~F:UoF_ ȋ}3 OW>>Tlڽd u]2dY(L#R-&\x)NT*Ly*QmJ\029N);f>}!ExkJ6)3`#G½S*wr!&qEJMB)橒65 zBXP(Bi(g]HTZGяarKH8(ztA$1ä >)r2ֹ-Q33v8)VᓒJVJ m4'_$(O"$f ^HL(΀St:lxnvx>]?0įr_خa4' Nc.̱{Ejm2Zc+ HZic%'7d|rؾ[O ﴗHrb0ȵNEN#( @s̓=]ڏe]>KF)}M8Er^UɍDF䖹19`I'^Ju ¢va..\]ѼJw5$E=zqyz ^#sc)y};j$Q 2f^4l|]i?MZqQrG Ym8]! ^'חM{K Vb UVQM!-[/ttLBpL9VL` "&hp[OJ8) S.zT1.78 np4QFZJM|"֔9x2[^6[O.?/a`u߮qu[ipVTTrvSBwbcCRoc{#4!J_Q"4س[̶&зfF# i4_3WLL;zzͱѫshmIۉϻȬ( YnFP#\A?|H%-8> JD篚xZLeEVt 8^@ !g4)m4ix,V:Ojklzߡg-\:EzЂp=.\^]LoI,E>8#Q7>9#UV+ KʚCt;>[2}\h]1 L3IzBTx=pţdHÙin0aN3˯_oAdSP"OC_YzR`<@$I"ʙ![N}NnC YQHJh,CJa2ci%`hqÈUC(Oxj-UR+c`ܫ;!E8a&cI4LHh`XD3`!/;HҌl{o$Gy5xmL&A2zA4H, sOH>64])c.q'8Ѩ9 +ņ#theqC0LP&ϒ١>pc;u.Lg.4w?LBm8.3gi7TKx~dAAH֩QV f akyA|G?\hN AH8P{K zB_TC߱m_=R# A{~ϩ$VqQ'Ai&SJnurc$։(%O3fT0Z{_yM%JGEi4aqwjkdE o=fqjNDфL[K-aJ:{cpJXcH*L?ܐA#ri) i>{oo4Dm.l.(\`dPaB(i;Zn%5;-N$$GQ"sZq0i4u4)ZK0CZ!K'ScyRޛ=F@5u#4 { ;W.F"HKFdo>?-|A44wV9u!>,T } ӱ,\]]z&zgʚȍ_ae) GRa%1J1'M(P(Mli4Mf5Y$QE5mEO7 e"Y>@P׫!pB96%_[?_ ^`!hoye0Ϊ(.Heug?p'v'hk/ \E4$!O\DȔ&Jڍ,gnu1sTn> S?+*g|=6OtV\wҘ)FtʹF10ʆй^".Ӧi* !ɐ'TB $vOw V6^`AP{Ĺ28Abq2PiB >1k O^>pRƍuUu5{(_ȕfJtt{>n%D}8׊{Uc\v㢪;5K_jLkL{kgNRT'}%5 V1 }mN1[CA*)+wЩa1u+U)s!iUbI+  \x}ɧT)GoS(V-P4VlNjY xUͯ5dVfߚ抳nyP̥n'yeT^Kz Hޥ+)UpEc¿}.jmhr_; " ? t B#yL>XIT)7Z~PuvSd7\+-ŕC77C;hJ[2&L td o̳i2Kii{g9k&mf=F(uPV,Y!90¨ # mE@\?hWKV㦯fk)CAġ0Hī uzVL_O# xȫ'v '4wz`!"!kG7mr4ژ9 id%-K`Gd>񉑵(q|ʨM3ksPPAQb0g.i&iAT =˸8K)V 'rI=ɥ#^*ӌ+F4Ϙf`fZz"8q"!FH<'W'żV~u|Ӂԁ`_#v"I E*R@|X<P4w^D569-ɝcC7;zȑziRU, 4k ]--E{ =_ RPiV$h+jP V-|p,T nv qdF#.G Q>>g!FHZђsp٘>N3=2d)2B.|脚'NR9њ{fty;0YfUR(+T)Zf|iPiFŝd"Z\.r%/9Ҁ]JZ9xvz2yC3aLXKFE8 KCPP#FV:$hz<oŊɝ4O VBIY\`J}iJ5݌qٯQ3keBd-<[I|vDw ̨B5C|(ƺ͡;gM6߅*vMmxx=PI'?/Vg̣~  <$>-S<)8 ? g/ 7l~hHk O/5ĻHgu U$%K-[5‡ɍ]Lnwb>ʵO7 Bq:.Zv|9ooI6ǀ "'2D#T֐> 1 =sr -j m?:)7D+V9#Kh8 P½uסeUU@Sr1k\Lg]tS\ P ffYJrE03 QSI!癱YU2piZBsJQ{:B)rB>̯n;o' swv _zҏ_9V1Q19_8JB^jH3g06wl)x!>gq:yE5Rb5؋*u8i7"՗_ ΂o &"jÿ8Ms!g w] BVJ⏯YbŚs( zn|ZEo,(X!Y0Wnio,Z2uo--v[M|~Q`*!! y"ZK8\N6vbwPY?;:^r5%!j՜?dK;_;}5V7 !O\DO)Ԧj?Q~z!*0 !adRaeP vnza@)~~,|HWh2lUrn&MvѲw磏?Y]fBų3#]-f2sVh7ξcÎ{/8'YJMja]^7U b꦳u+ AE$vzG.TدEb.]2;I)NkyU954JC8 6ORF:!Z^T ndq͏hkCe\ ҏ2{ RtbAWk1V;4htGE:vthi.q6Hweq ~ܙ|GЀדԄ"Rik INK)XVe܃u;k J*%a)ԡ48ki,AM412q_ Jjsosv $\5 *8kJ./Cڈb#>&7v/c`σY7Y ϢaIa4 vOv}M }hʆfK(`hӀYF,H e'adlZ*$*P!˥vP>/ϟ~lve@ZfhT: H_'iAϮ=Q,ӷZ{ jPDX#5\9vTT: q퉉ph1Flf%r?IjMd^|zS[n^/ K0X)DJ@I^EBTJcc܀$@>Qb -JS_6qVD6b]e~Yk1I8)dzMW.ޯ%Y'kvyERcz #= tXyDDpQJ'^?b?<GAUѣtg ϳw1q2"MXG{/&o6|}A3[5Rmjk;Ү#c!r'ybq!~q)ME Z5h;w `c$qscJwJVp:*-FN`uY߲ :J=& (ǐ TMaN8È[`iqLUD &1k)EiyaszQ9%H%l`5** !`6tanCx;LI X̯oLRJ!a򲙎V!X|PFۭ^?fdUn?uQ.d0_ C6-?)k:I}NuU}\'E8lH5 )8<̌ĕSz,( ȸ.t9"ĕc  ĠcY4)f&L(2V;lXKR v F0r]I9a>By~ɢw/Ƕm=o,y9P_-"WzwkF7ٴ-l&4 Voa~6®5RRX9Zg" 3X;"%K]W)de^rMT2\FjzvʥiKW}Bm ZyA׻ȟ]]sު5qAo)#az0[fbO lsV>ێ,8g4=6 qMUVB~}Eo3vyJǞ\U\Sl~jer#-ing=hXm:thyxӇGun)}IQy6#Bl>JSvb))BC*e*/[z2: TnQBzT>d/gÇhSrVzNṶCӂTrĜvti:8khQNQ?MqK(yA}1Xp9^ ̶ G;+Um(k<0bhqH94+g-ۜ@|nLn1L:n5wKPXwko/-&$_f)y 㴲!`%*P̔rxi5JV*K_HuׁyOӀ`&)ܺhT FVs.+׌.S[A` ̿}}_Hۦ!Fvw7ff>-]EFx~\itV^nxޜp}n6%O,퇟?؜ɧMY,îNڮEׂSUR&SǎITL֯j)OeXkLO<^հ|r٤+?ݻdP%ZxY嬆f>iFn'Y}[%'V  ցSMǴxY#*GTt8(%zh.<ߦ#9|!n. ,GftkurkU. PʶtՂJERa23Rze?EdGoiW+[, 9҈y))Ӫ`ª 9 B`/9zNzO+6psyMTNf YX۞/!u4y9[CnGzet&p?t/*0 {qh8F~M.c2sNs0`}MW ޶WM)ز/YV@YcQЀIJ Ua\#l}GQV >^5*vcT_'4#sSC8_!+Bwj@K:vBA7 o{ЖbK)SAt^KROI/pycgR2%R|U}m\[f.gfmjNf |>ruMۇ?`LۛK~V.>R$>UYv| iAa8vSq {]m]:(U֎HJWkjBv5P"`L *QHx"0'V5֔/ O!&ՑZz .j900m$`dѥ@jcfRMDAwC_E)]4)M&15^Q *R)d(3Zl[$2#Qg ZA`"wM$:'M U߅>s`^j3 &; Y'^H91#?1I(SA ,jV! ~^N1D5U&Ғ$1A %x :RDX "B" i`d 5h1弚cgd Ru%h.V%p.l)H)ۂPGBR5LiE|%u gsFNQn~L,[?ڠS101cH@&Kz=W aF:ajkܤpRb*RVHJa1 K-L© 1T;Zj<(h j8`Hv}m10kQmuooA-Ʃy3 {dIi3~`l~h-vޯaH1$)ŃLu 0 f.bENzbqiV,rrDC ^&NT7/PLUKwd#(݉r 0~"_0{.Vk?;ªl^bDH*.-3(*A^ŀ HcA#F -׬#1^Z*%j@ӃId谞uӑ2EI^ׄdDuM-' )=HƼû#j4\.(G{2r|RGҤL:3Ӏj,ϋGih-Ng/XE,-hμQAE c8w4ȄœEFwza'+ Wskp\tGx:],d^|s\:rtztW={ƴ)=Ծfr6lZ''V3P޸0Lnz`i-{ hTIyAai't['(śN(H9p͸G/^TKocF4uJ^9s߮ Y(;KOUX;ψ1xXJ?_qSXXȻ/D"P,{-$ADU78/VCԽCDrLq*?;, M15WSc@) J:**'?>fWX}*;1AI] y ݺ>A9PSgHgN1=.wqF_җ\.U:UWu{ $zȗM58A $@%Z<,_F˯)R;9]!3s:JJlJ"JYV*4hek d=RZav҅7I鉳n;%{֚vX:*Jy)5dd(0Lxi7 KK< )T-lс>Xpni"Eɽڲ*\1U[0$|f|I.`Js&Ւ[0iG-0$%F9xRaDUamU2TH;G_-;1EK1h=x rp0WP~ˉ{S>œ\^Ef0JP>9f_Z2XcƝ91n;|lC͈#1;Hrx:vtѥxu:HT"ОNR3R@@HHcQ(dl7hP@T(K>>l\F=5K65fյ]Z o.K Pl䧛`[Tc@BC&R <8hq}DQ:;/O)\[XɛG0wsv~7wg<OV}=Q(EN(8 j(* I[lskob7nx|ދ<׸;na_Iߘ\ƌ&2.VYN&#%-RTbL*ڈUuK[ zN[AKT-E[:_IOamVCȓ%ykBDis"Y/$a+!U )U2XdV`3<,IJKE)>_nbnԩ6(:q~&5Dg1QNl,)h&/EET Y'EOvNSV݂>)!l^SfCq*/+H95◳ÿYf[P%YWfh1 \ xvZ+ݞ]lb쏿Ъl(VJ"ݸI*&hg@Aa9Soqxdi-A )VcsuopyKq?|`1$Jwbka? J+5BA4BW TK/aס歲&.Z_5˛c# ?mkm<^p}^noݢ'Hw SވZ/֎7Gńhy?,׵ Nd$0GlruU&|*S;S]\Mb:mtnt8)Ϻ%4ֺА\Etx֍#1떊Aꤶź3RѬ[@c[ U4EǯLE["z]? ^jk/|ܖDqu]{uksc>yu{ueq7ċh '>@ƽbh}}W?K_nt% F}L$"z|qLuO|AG.QPl# DgIZ{uIhze1bE)zHOU4Y9Jޔ TRzBJ o4ǜ"8i`4·vVp?a]$B jZeRDlo46Uc韞-C 35ַ.ψV\ Q X^ ( Qu_C2e) pJ`',DlE8."`^,/ xH12}rubOٸn} brE+M=I^T&< 7ɍ[$^\ SQF  JJJE Z kͧp֡Z*F;߫:>zYb66ac؄{|mwGvV DB|fjpr5>`0̀GTa,"#쿴E^o크7={YucM$f)װo/§fʚ`~zfW5TW՞߭(lᛇ02|uH{wFE3O~_u}]|3~awbZܮ<Է0 )<~',|C)g߹K$A>h%D)0ڱ60Shxۤx94+W$== )v :BmϏ~֑?5Ghl=ίsi~RLKFڑFpHh[`PG06( R'pAQ:{ਧÁC7\GwC)լڿk%5!/IEV w\Rz 5| BȄ$A}X@n?&I(q@,TOL,dӸw4d2utL$߰&#+'5D l|WI]֜mz SeSPx$G_1,9x0OAuD4; `spҧ Fr`긃: CvO+Y,y]5%BpA5v<8~AO(BAKFijk xRFH6My>R<.AɄկT'flZ'WY9"#m۝=쩜nҫvrARڣdν# ^:V+u:(&LLeSu\ƚ<;<|Ai)_J&[)Q4;eR&ykd[;MГFK`M#fw-i^ɖAV٬Y 55ǻZ eAEyԢ4LkH=eZDz *g`0R%yТe7VGqSyC57,ux I)xV U <+Eb,*Jy)5z$3JW2 4F֥%ɧ w;Ķ Q=z&C%(x7bb mN#ygJ*#+J(2ee &-xdzqUX"9p3RR+L5޲ aY*fruIiX111cB οRs-&#س2PCa#JT#l+O/0RB%}(6*[QiQ1R4^RDKBJ  fs `!$_`a `u[Z \* !O+ 9فLd9W^K٫߀%qٍ>t襒7L2p9Z: "[ף60˃:$aED160|&ËwՆE LJՉIT?<CWXksw+^(wIq15C%5)hК4gHA?E }3h Jɱ>[΋;34fV\ CE:]HL.#zztK}t(UREqŷ|2<£ZL Am ^sudһQ&X82Ԙ4\S\@)9iQHzWfy!7 6 )}yuY 2Ol>pcnpݾ84A@9"jfM:KY5Q5uO2!f g$d4Dz_~zt sH =P:PK'&^ZlRK~Ċl 1`EW3͏P>$ ȀpCDIV3ѭ>m2%SF]|{=Yx)J Q٣L{a"(7. z\KKT#=w#*%DTkR9^1e㥷F[S ^w'`Eͭ @~X­ͅJmf7xع<ߢ_m/.%b+|Eq"7$qUr[:"4pYJu/%>:CkJJlJxxIVk.HijIAyEwkQ&"s 叒;꜕:NB CՎ~!Xԅ/ Fc_̮{$ 'p@(Mgc$v2V>GJT8[-,(]:$b\o(שZ ץ*:,ORS-bLFG,N[~LjfWB`?EK[$5nM~̈c&P#V`ES鹓^ܮB={t7`V'd"']'vM8~ u@N}R؇R!-\qZ- B@ Biay )%NWJiz+zJdF\`[ٻrWT~T-)4ЍWakGl$)|#K,=O㒶(JKrrlK˃n&W w AwMA嬨J0ߍ`2h?WM1[ZYYwё҈_жz還7tAVIu7J3(P2@HEVT[TPYe93(#"Q.֋Y[JT8J[y-bԡdAVRQ(Kd4P{ڮ!SvsF6ƴƴibDVX*%=VR.kuE^b0ʲCc,]y^0R v2@GM|.u+Z[g%%@b=ʠBX .))N8!SS]+ҷꝍݎ jE0 q vc%5}e`SjXСF\9i84*TAMwX ?E+So'~Q[&;qVD+)*ѣMQSw']s$d+joS' A` bs,zhid[ROo =*0CF[DApڒ^AN Ზ0~*J) R6iu9QF+Es qDيhա+h`c8]8WR1P]DV$BVj$&;jFMYGEwW+~hZ\u #ͨQ8kdidmF}?,~m';jFE5E~-~Enr']t$~\ gW2ɠ\ {G&92Kx-e)ҕiZ=l;E9&];U'|LVfFz t^z  GmQL1io(lcգp4 Xڡ́ 6Z8Pj´t⇍.%{bƉtszᔀ^g?#v4)EÑ$щbTDT>ikyֱ 9$Iz D]테MqWW}nSYdTٹ;K`?ky[ʌeqJXnG}Ѯq3(RDs6PR(`7.mU%FPAZNf1z}Dg-*(n0Qء3l= &gx@goՕIpnp3uȼ;CDK5Z@6>[G(pҾjFVuQQhFV-wbMw'[qڷ #jl>iF@ϊ4L؝Xbd腶B$e&X')xo|w3RV#-&M>dG5"G^m ENT%mJs +tA]JoMR&1G@y+"K d|VY jh+5x*(+yZ&e5d/b;Elw@ ]1 B`Oh$!J*OEPT3bkkPk1ƔK E*t}T' %mT2e4: {O)q2袒Nlnhڇ[ZTXEa-:ka:bOwGmB}8ڇES6Q96>[G h9YqE+>l\Q_fI}UӎFV-N;=G}mq x~2?ޝ|;sс?Wͥc0YtwV+cU;7h7nǼ;nEeWLۛgx|$]>t=FNf8́E +{eGߤϧO?Ok ,sJǩC@ւ}>jx~ivm95%.r+UF;sA0j#9Nw/A _pV熇 s D9Q`B u4BŸnVû6NnEb-TXo*R cIgd1ϐꙍΰYzsBm%wfJj|/?B3N͍oyt@OJ,9;*c)3֐)IJ0D 9pƋg7/˹.Z ~07m>xnW~2_o?i,瑚$襐﷗A1vdR]ׯ3~@ϟ?v(iuo4J U6[Fuqu9y]ZW..KgowNpǞ̿__""r\_gW[пN[Bk+Jz9VkmH /waCm8~I Ӧ#Q^J _5I#> @"rSzu%˿;'WG/g>[ Qv\ّ9)Tvn/^gs*t{-M՗}^L-TJʙ&j jc/2voY~myA% jgΜL̓$u˯m8Vv3ʰd>=ڸ!\S og[>xB(O;HCNԌ~9]犪q pk\WF7E OZ5SԟQX|&/>uNQIGly++ %x/a +(@ࠑrb! +؝P_7_h|o$8iѦ{ד޿mo߀;4` reTZip$EC'a 1dcAY}[IإGY~pm}&qvOf' 8#S 0Q .|i9#P ZIdUg |W?cg u ];돖ﯓ6%fFϿKVO#J^4do.Ϯ/qբ52 q( PO@]0(3hLZ@ 'jV;)))!h….0wυwiITO]wR*ǀ+]{aGz?3 [,/#\޹'*y'ZjHOAB^M>P&:a@|dŃ7Ѣ?IHe5x= jḒKq=29\۷"x',^;$w3Elz(6fjiF9w0RKT×|ky,%2T׺,in-5!WSPc88O$Z<mL7jj {$ZρQ'̩Ҝ?{5j/#8wPhi|svBWBoxP CȣT%b3=r/v2Zs?[0P&VCjhWO!8g!4w?!'Ԍ'֛&K.Gk1K;WeTG_K!3FI?bS2ZHƢ9Ob|}1A?Yn"??Ds-M^_YUԎ Kf!0˃ ?l^.8pJ<-+d]_t:z9Q.,ךۼ|MtQq6 _Z9՚?򈓐sגyh$~[R4U6F 4jGKL5^Pu;N'+b**nhF ?U2®^2NхFscs<ʉ8Nyv"γq^w"6r4ˠU%܉U\1ϹDBBℋ͎/fnv|ykiv12L밨G)$T -PNsnPNa2G8<x ZkftJfWw5 Eǥ)ь`vb,AcF="_"ZFqF"-!~N4Z-g16Rhv#) 4Rs,Ȇ$~KLQ~x7 xS8&/&FhJKQCRH'сCefqZ 6ؔ逡ݦ ǩ>|Xyy9+uVO2 U8glTKvsx'zA b)@e.ߝYZeW?|.;^\\7j-_.lcƉхRcfr8s]f:űĶl$6c$2_u,Ѵ[ - 9c4(ld`mgs/mP}oPr+.Ivw{vg'+uUzg'77^߼ފLz<O#,_m#!GW"B'"ߨaҺ(Ng)z&pw-nۃa`پj[p2J[&0K`ĝ n$fc<9B hn >34Q>9h ^ GFwJIt\6pa)ԚJ.JrE{<_)Ζ9ۙzZ$r`3kד}o"x}s1_fnf^̑0~>ʯ8:9CL#z5(3ѡ"k+I.0Km)E邳"hBj*Q*1Fi0"cc-5+ D:O]McmDל3E@8iO5Zփ"q;f4­AJ8KR1[PnGymO5Ztb`v3Ͳq~Ka&o^eG'#|ͤuv48X-w{&_vE/b13hFLF#& :fB!YvF8Skr\RmLZߵXaJ1ENoH5X\/nA:4P1Jb"@E:$X{)JvT#{I=w ,IQ(py:b%II*9;P-dQ k8IT hԣl G5-$C 9k(aX&P wiVq/Yp!"^|Q)ӑyJ<%2;PKܠi5YY;Iu.噢1|ƈE AyVʴtTE$R 7I˄gslE[h✳I0Ay!jjѵiKgĐ2RCaFᬲ9~c|pٞj㍨f sfLO%Bh?A-J5(,G͗ rmЊmWtkw5B; 茎I.g2^\x`"qw *GT?_v~]_7G5!l|vG3` Mp/hgϮ~no{ѓJ@$/EdGeBp_Zѷ^\ܼL_G >ف[RZ3=qͲs۟ɸQM#N^?78 ~E2zɖj7 qzt4Ew#~3/zs~ Ttf^a38,{DE7 _OjfoTG4>oPm<5 iH2qPxPy"es=ՂS2 [%#SiBbfW፞LunرFuӌsﻁSOw_v4pb13^ĺ)sb |ڔ=ZҹO}) GKkw`J5XB_=9vCdZHy#cJ%vrǗ)xga6kn&幞mj_.zx\OniMpJ8׳|zp.n'\O9G%TC  @ PVxc6pul`9Ŷ.`ٛ@IOM/[t,lݻX=< [E2)΃(PrfD 8bK#) kT |ՅrҦ4ڀX%kVOb0-yK7'+R`,$ a: J5r6)#4 -@ypzkOV dFgay"z{?ͮ.G'w~?[)h+q5:oh]{_\4V|e~O'qon t}ǒ<s$SŹݘ: &^:OT!!)MfjX}n%#v+Ԙ+B}(g`rWNSϠez.E /j|__~qjBB\#tGuRk艩ɊDtmܪ&(m$eZb\YR;4JHuT+ʜҴ39*-i?i&0t+!*WW4la.çwz&4,U%Uy=Yf*Uۿ~WE`x1Cj,Aix[JÏZ rf,m*Yn-!Ukk4v2Y.hB=5V76Ct7Amđi5í},O j5,5&(Q\r7=]Xi"T;=G!ATIWFeJh=JFؑm9(@`y}@gz:_3#wUW !a3`]}Hx߷H(Ql' 8M5{WWWl4۝G^ J6JTlNXrq)@FDKѐM&60Ӻ,]3{ۣ-&x"TY/rDhC `YkQ E$KdFPE` *lbK 喥*zXp#kG!vd}.V]H {*x632d]QS3jXa3j|AG^FEGP_⻠ 0"%ڥI˚5%v%ToFZ.:XU Z佃6F uɪZ~W>=t*\jY6A WNBUGh&FV2I:>Լ|hERt+k 6nޞ .UӇlqf>/D>Kzu]#Dw. 3~X1bx|v4GW3#+j^>sv;+` pͳ=.+X;ruiJcoYaR=]JcBg5qAO/m\}ʕQHk/vtʴR{єicC-QG UXF>٪J@Tpp+f[E$Cp\_? 6xy(M+TGI\tԚ^ xSظydO|v1=2'.>}s6`Udgi'=R;7P:$e:1y^V iܼQxP_,ȟ~iH15«ңWvyatc:ԉZJV|ܹcĖh5IcGcq}^I:(`JbHuVCZxԑm (Oɳ Ѫl% KJ,ɺP@#ť5%[Ve|#CdeDDXPBW3]0D㴌>QK'% RB#20ފ= [e`>a`ڢ[5EBjx[[V2h iZCV,2"fY䚈{>v}HQZE[p6đ880:3V,1:`kãj-C55ԘFaG!UԘQ?I!J5Ƣ)_!BTc98#s께//Fq"w#Z7iR @~)|RDiƬȚ_3#S x8@$@:*JQ# ;Yx:ur3Nٜ4ްeύ6ocQI40tJ5h;uN)jXa;gF| AgdՑ|EW)| /n޾Misv М5Gm8iu[OKP0O`H倨WYPK^Qq(VO~5 O0zًBȖDJW`PwYQ,ӧ_L h$Y8vb+y7bHMٚ]Eـ32xߴlydk2v1BsXDցֵ̂iYa#h5vcATrGG1t,t;6jظyuգ(n2ˏ+v=aX)-##mF/s 7B-pmhGbXF R̗2eTs$tF4a[[|8rul_Q@yÊm*/QQN?eus-]8Zt Oˁvf6;ZXjR%5{TVDxn|ܩt+yZөGʽuuܛ;s͆Wvq} fwPPg:/M"^=Dxa(C}/,)]@dV恼oۤ_ސwyԋ+߃{\))sCfk^ QQl< Om lFecT¹nK,u2IX &±.YaKR4HV!"|1ZDMeP~ l\%b %'A1K9YHNaf )wۣ6R 6BmP(QiϘ6Hɝ0 AFb :1![u,ۣ@FoY" r[2` gP,6_{?i&6H@쀚)- 2dTab:ά+2+jiugH@kZl[" e,^Ye B&B)] PJ彭FW+S4}IsFi?Y{6vn'4 6F~½8]#Nwn=~e`,2e-:@Y]o:c228e#=|/GaLTZ:FLJ6dIϖ/),&` {y Q_6nUWX_Pu{١ńY+cnYajq7mbPx:@4 6%<; >@l ZBS5vؒK *|7Y*l_7]Qz}g_"pcyf>|bqecz&L"?sUw}>ٖL+U; btڌeC_ٝYv!K{e{R'GS68(M΀ZQ e" lrTv$*#nj[{5Q$P%PrMB"낰eI:VJDY*AIo1YkJf?hGSNHrԼڲ)J'`Lc^Cumn$ a1u 1lyieݗ^#i-,3 Ũbu6=St—RlaZыLuX,F@= # X!5A2q2ܣVNo+UQTmg&v?l]O-'7ئdlDzsjN>HhMbv7T\ɩ`GACgZ93z]+tUBmF*JO֠XFM Oc&RL*QH6Qk8%,UMsRaԍJ&V,~&YoSNFXD[eэ"iQr&PS#je@ئ =jSrX]PMT"^b$U*h+?wn_SsvяgWWM~{ڳ{ ؋e1aexP"gJdB|AY?珓kwj;_[=pW걦CKyqon&Wyx-AϛX9 )XGJ~oz}ى%Xr-bɘZKԒ02\cjSkL-qZ]L.'Rbgc :tm2#ŠMSG..!:/lX|Jblq7-F96 [75a#͵eDWo+#z#!omFpF |GgCHE/p)'6Ö`;歏,vݼrjMCUn4oߊc"M}23 W:gl6/жdY߃ZaF?j ~BKx|`+ /tyyQl{LUg xfҝ$OfӋre>gOʉSN칻g?̟i:|m\LޜOf5Om;:"0v F9J%?lYxhl{gKrf #3 gES?ǘon0Nu=wVV|5˝Юa:dUjO<8- '~ծA?]WTzOg7LNvj[)d6241Ç]ډ'H^Q}VjS? 8W UKǃZJ%^ZgHhp1ZKޥ@3{ J*N-?bWt<؜N>eO_7n];АV?r>jP eBN٤RR*hҠU:;$4h0+3"Ȝa{_'i=yf [zޟ1)M){0?Lh 9?{O6_ewvFrGĚμd>hD,iuM0}IYCGUuu:&\@ fcjHbF,ki G^XJQx 9U%doPK!r1/4)ԍ1LGȍ &0V8 " i*h v۞nvO 8>{wQ&H;Ӓӽhe ooCHAl򥶏>O! A$za?-ƒldG__A, aޮnc~'UVon$šF9ߛꗍ:j@ֽ#fNa-CA@3<%7ދ>ТJ\>Zoligipd=x >j$MЪ^ Wo,fI$&ެWv܆)OW1ߝ}v4q^¯O3YÓU%0>;`;tT< )nR+Ea,s}x8mFFE6MﻀrvCr;%N 㟆t-jp=BI>ɮ2LojE&ts ݪutuh*EcҭzڙtCsScؗn6H8a(".*rnVS!9V)-ol~8`[x5sAHLv,@UOLWN=upͩO;To rγ_~.U>̗͝w4Z/lo^׫`n~^nn`oC+%I3ͤ 'nj1@z1S,v[ɌQ-9 +}lK~,#O 3<~p zJ)PirH&![!F Ԯ QYKfie;B_NC=C-긪YHsp$H*HމuʹV 8fp)Ǹbԣ< ެY1JWѴ5kWQU Ԍg:jqB{| <[qU#\isL>o L|hq334&[jc)N [rYL$OaQ"4gqZ7Gp7]T"udH*"ĘDSi6"1\[HAߵ0oUzFLvOP&Wpؐeo_%R`ϑ̱sD O.-6:z,Ì8eნEM=j"[<%(|ܲ ͉v%Sn+ BLQ(C0?PL9W EH#- QcŁ,bQ LZ((KN lG9ΐN`_KRQtRQ 5"R"c1 (qr2F42808SeB&1cI" #:rc,m& 0T ]K^Og/te = df}[;W_7o—&BLdf2A F0D% z /0|=/C(O1@d'fZ+fcZ*tu{ؓK7e5,'&PSŻI>9Nh;(5LN]죞=DV}qi5tQG=ÈIi)):fWfz;w6ǘ$v}q)Kϻa' >M yg6Z:wղϯ* @ZV4+g$1 {FRxlJ'$@p\Rtd 4ԣ<Ԍ!qus)$21o> (5cMjhYK44}ē9}W-R> G':e]TYG=&#+JW죞\@)n[1OmcI5,լ\SWV>UZPzk0h"pMu)K:"~]r94}H&^lȒ)BYxZ|)/Z%dB$J-${}8 IH6?sRX-eB9;@'$n 9s뙐]AɔߘȘؽ'=DWPYmFW<|y:sjao{J*2ygZvBbVH ~f~lv;.7gG.|W锟ПMݙ? MI!QPh>SĜAnt^3 qXǾ[ܺ8NRьvvm6??rk|ʱ& ]9 ®|SR!Oeo~~~W3_n``aGO3;q^^sdLdDF21%[gJD#>c6H8fhü\?ۨ|g# ZhC%6$ЦF"' e\žCפ& 2cU9ÕMBdxEx`N` n9FF"28&ER #h(<ֶ( e@t^}9]e] 5d("+cca- a00Q(,Z,MY{%%C_RTΕZ'[HqڅTta>LU~+zQ޴BjڇbE&}3PtFY==Ԉ<ƛ.0%.6R=ð4뇽4ّꥫ:45rH~T!k*up{ ῖO!e.Tyw6b/}OyT朆z]ϔk:2O.eCGVtߍ~#+/ "(G6S7s {1ԵHzZI 7oF{rp"4S0p5S@zޛ-染<؋ƙ RBi3`ȁQ 2Ќ+$ 黑ןo7_f2n6qgͻsǞG -55m ƞG*#);|* O.u-:) O.Mfk ЬktT`Ąfaf2Pr7}l`%2VXFZK d}E oG iy,sۂ܂"%[:D4Wo-~.johoDEEd@[r0hwl3p`uo}a+s#JfvEP'x4"Бwbaycixho'e[e3? LkP:UR2 eS0I(?c]{[' RS":OT' PbmC=H%ąrpV\Lֺ|!T#XR#*pNJ \4&,dҲa!y@ I8 [Bc#\ˁxPyɕ9/=9WWB׈½ ;#e@bM1HZin, &#)cRIʽEYL>#'zLd҄D_٤FriaC1 $c%V]kCPA: 5TK?'e^.8~,za?80DA6kG__&/ Yޮ'ɇ~Ǚ Xlp#E.ߛꗍ=҄' tl?)6vx=- zjH9``iSoM{pMO"=c@ŀ8f-9/fhoXd5ouĀ m$.W"a|DӰ Fp=ԲO/!8 5"&2Dip1T0yЂ3Q E-__V 6`>{y"gF"יw@<.'1$}56XY̷9]:9qE.v7!?ͤㅐKHU/Hf0δFI>i1mK~5 ЃPP]F FLbIwߧI%;O(tĻN. ׌aȈ#CLґx)1seu;+~Ɯ H8!MP#%q#¨eštΪ _ֻzحwAHvu|@!CHE6fѳ5Tn/KxR{y*Dڼ,n}zbӷOmz=|oӫOlŨ;_g۴PHYc3 .PP>kГ;6"ʋu-%5m7zyN>ݸ> FM P#gLIb.׌H\vӊl"7 "dz2C Ԯ E~ҕ]JPsZOF$%zr]5g46!ÈV25VH r'OPU0sb)RuDJC͑l`"Pc%prmp ai2ח -5CʐBM*Zh-F)zT\c"RĕnENuYHW}U!X2n!V `IC# G0 D݇xEāР(#Tg zNf`,A.PTq M*l(B_DSI:HdTm5kqFEd N  bdŁWgȑ4''?dK3Z- fS9cPůUE.G CGBKPXN bl%pX2dilMNX!LN0A31?Τ£V^ 0|-bAo6'MF2 9bTi/HoT)yU"Z"cRTg򶰲#\Z|t0(0Ė^sӥʊOXt)2ʱo $ !e~9KƇ!)>ܹ%xb<שQ d2Eb0[^m#72%+hO 02.{2 נ .JUX 4C3CNIIH%)LГuINa*yخZLj`!;lnGL!^e!*M0'Zh/ `VI)°ި' VʵʦY^`Si2UeYv=R+zazӲ3E)+8+~}$ [wz[? nЬKb} ~8Hf}usɬ|0f1+Y`rWP"UQlJ"CAI 5+Q%3HYcUp". "h/3w 7ϛ6l{(K兾f)|93[Uy\脪yC4m~ñ/ Sau 5wqZe^+U.2КTօVMd(LMQ D&PsDI]@5p/]CgB^ǵWfaYR,A Შ\bjjk"m-fX*S˰ <[Sڵ 8:3p#ƨ<Ǥ3NR1 C)9]`N6衫Ax>"uP m8εc-gzi6d?GOcse=PcITDg>R՟&* ֔ۈVd…)Bn'7%S'fg0IsT߶_>"j{>GUHGTunRJ }$tnRP} IkN0%I>ܤcuG)5>MB1pRCRz}Quw&r 0)uwq)4h i;%y+Ʊ^eA (G5HeHeZ+AdMUN3R{YySnf\L6穾mS-(\LLn0w*0*, ʄ@6D@}8A}`ʳJL֪I!.bZahب,FȊz\UqF+GԂ0"9p HN6UDzhj%HX5ݨGja!)&>ou_ A$yUwXzhp=烗H͋jz`S}ۦZkQd 7"h!&!UIUC~"JU\r܈j륣'!mFp=>M!s-V!tG'w[2Z03Qy&q9iE\*YnGhs$)X!&R2/s^eXS[*@ T4mJ0ޣpoʻ~)>0Z,hB⛧o[>hT?¦'ؔŃ@$l-rcɜU4g *1U2_e4#R!BPVJ[ K-вʋ*D&JVƘPBĝ D VhOI(p'q+$ Nₑ3/JŅUBNvKY' kKMhVjWO3Q"j,U ^̡Bva}]~?9jo?Rgrқ"<3{C9>ϙW>gn[}|/Y Xg}TE"oo>VϛV+)Gl},J>{0hUnAV%wYK0<'[''-mZM݆}>;{7;!l`>xrJ<8Jڱ,VՋ޶@~~hqXSZiCxs60cvOd0zwGdԃAnNOH=xSA/(WR'Yw40:(ǭ 9ƌeFtLT ӀexX lİ/OeF3]c=ˁD*;88YM9>24G48j_˼l̠ yl>KIuN*r7dHF2UsoMeɊ'#]>L?oʴ>> I7J;f}y[~ن#l' \D* p\ x{B2I  T7:pJ6xpJɳuzҋDw?vfBRϬ۬%=SVdPʊ4eE/}x}RV&86eE:2O.`zq{*-?8()I9 6mL80H>5+mhN^XM&mCiRfb-J=\.gj "Q*G5LfUQBTP) ƶFLŬ,KB+@ A"{fv+ՋjMCW5 x HQE+!rXB9-fu,j`AXQ^/?6؝5MJgji73 xOυreL6_|۴P*u?rYj y(ܴ1gw{BݘjH&yћo`j۹yYextM'=%nZ?VI+DGڍv EtBݦX 3nunCH+R})$]MSPFAg6m_}G(S}~f=mH_J+"vCu,çӾXoz}SO{B0!v8s1!Ǵc:JXk| ^6ł?XLC9FP rPV3v908OUVltXp(KN%֑T,YUG}gX,?AM8(PyVS#*e6JDK3)"fnk6w:)R2%$قlr`4 S\):/cBp4xX08GBK8Nƞi$46g(g3vB~Bg3mdm twـ"6QC‡(چ%8D"ҫbwAi [k:>?gOo.Z& qiQ{P"bqPzDTЇwM3`%>h@zj!wD*"eUgڢҺN];7`njU~k@8peۯ{jlASXv,܂Qray HjJ kJD2Fhן% Z[hZ%c:ߢIUXnS*x!n gP`R%IVw&$j_zM)_(]aZVAB3Jア*;0V=S4LGߩip㱧W't60V; Ȋ~iN"pi̐df,ǖ5/A 1(o˾0H1l[2Uq*č) EheQ/KRXj~%1I^tldwcyu x$n\>lP(cB&͙ٻm%WTzٓ:;I6gg*<\$hcˊ${J%khYP$%V*5)l|} uzJ8$4K a[T+L~IiIŁ&]ծ1ӭ4wEd1oi'x3c-뭃.Q<`.}*G7OԬyD-U^ȡ/ՕlON\y()͈3GDwxlV#t+q#dkOi d m w47D35:NϾ>z Cw/04?y"6! A`(-YO nx_x+=i/45ȞκOѫWT%Id9GD"ښVm>{!0k[*]/Km(֜xpEpBU0n:p+dcBiC d0ڎŗ#4L5h 4xn.,X34ڹ>VuҀp93gFC4^0J YZs=c$g8'C~Թ>忇o\Mgp7\t =,49yUϋ|\MyºCx &}vT2>46ɋݏWs~8}[kwGuoྟЋ뼫=h %3F1A9ȌR㼢&Iqi$IRC}&YUfR8FM!24T)P nNRkINNpȼOBJ q ҧYؑZӄ::tm,%xM3'3PpKk4USa=>Y8Rygqy+hj.N*C, gd,2*umbqFHQBQsM®&RI)%sV+cSxYf3 ){nզ`? ۮ?ߍ&Iz5#_'Y3}0_%3e1R2P|~N?:oSB9yeAM]u-t>«Y`w\얮BApA14M{΁ } zēT2q߬8y̯ZK# #s)5uum2?{iJ?>^Db(HߝrF4}{ϻmo0JnPEf3 "ڻɃ?a V`z&Sͤ]؞K&x 6t#PzMjCqN=L\:ײi Y~3ٷݤIL^c-GJ1ş58r6dG܃]ƿf{,rB{[cf6hc7}ҏ7F3N*ԩhJOo<a{I2YO:m,OtFI-ԑ<+ц:4l]ah!'-)}jϭ:iG Qwɽ= o OO9>+>L%7Wʢxa*V9xIdȦ0}A4 a22O>s b.g~N6&,/طSJ3cdJ8-IٔZKu Iv;/H%D41 瑏bce T@NBA#,fTQSFx$/d朔)(qADjjqrGZQ!G#}3vpNDp!V&MtLiʅR tiSECƌpfcTe x+}p4I7l E~Pd Р D:_1%a};3fy3;~7g>>D1-_ϻ&# |ᣟ!2vtECYwoM'ӹ輻 ?VA B߹I&KwPt?| 4D}rLKAcq# 2,:"RtDԆ3囨ZZBSx>ej-j CMkhWݚQ&,>t_^xUNVB|Zdtf]LAmV5z!dh A\G|_3ز?#}!Fs =m W=s|O GCKDaEX\ZQxл;?;KpaiXMX3+%EH,Hrj*@[9ċ -]> GZZ9gȓP !@#;Ϥ y;;.!CFc8K┏lYPe`D_\pwR7\,U_("f_.'m/+L6V(r7trE^IޒdWjPpTWjctK[^ 䮓n'$mJx0 oq⸡/v<! E`HCC} RJex۔.H`flgC%a  9I.Fl?m%\7kYS5 eSlVVwv=F90f_k^֟@o'p~a:G<2cw;? t#yz TvfO~~Mn'5!,>F 4G+i'kYtyM[P/OZwl\ExRJ5ߴDIvw.WFa`d /}t;oMLeplk6'[g]}ٛ+D6ZvNfwOF2*ZNTɁU!TCQuIXc[@maOVgV`` !HL濗4&CԯgsP ݢtjAŜu'm}~s97'7^'S!?ߏgL:)^3 v<4ՉKJ+M&}"V0Ƥ^ EbLqAPkB_p09ygVN% bU_ty"pvLǡfLwvrXJL7γ,OybNViFO9e7D$H4)YYzOW:!) c:jqc?9\]``F")I!sI m=Gakd J>&&̨SݩVhQ92f9OtK&$qb0kbZßJ.Ypۡy3jӔ9WpyGҔVL=ocHAesyLӅ2_vSLխS77> s7}Ǝ~={Hg7l0o샳"9x֗%G.qN\ GkK!32]ӔR- y˒gS = Y%I7FYQ)WM2(iJ+&%IikI9*T]ʇ*Pt (z^F-KDJ➌$L2ՒJ}J6_q7R%a{+Im!'\ WՌY* $"Կ* "i*…hawKzSR+J1j'M\ Q- ?p+6­[$:rMM{vkJX5.R6 .;R[VۓaSQ?}?=P3#JQ7Ȩ) Gծ}t$mzfE|l۫4r=׻yk+ Tk" N#jwP[K&[oj&٪$m{Z+8/ #[<Kf"N2A2"ZƼKR3JZR [X [% n儋OĪ/a 0[_5S\.ˣ$[K@7 Q8,hGnphFo8QwHBa+/T9n7FHJ9eT:3q%7 XiǸ%i0RSk)196FovjYP@o>!~9mg r{w8u,/&d\tn]iN,mo<Ç|/pYHcFyyxs0<!ruAٵ ~Y/_s)a~AtogˑYfMELq5f U Ftv}6Tou<խ y"Lغ8Cid!3LVm*RIAˣ= &w,6\0pI.GrcL&y*a&Э\"uEt]E99~r̦8aL%Ӥ3vnq ۷{NDvwsHByqNMy1OXWK#,)+4# mTf0sd(<*Te,]3T<3S:Xhc(i E L'21JR5c sF A[% 6iA[$M?z#JŽvT/PcPߨX|vp"{xS…R1YYe.wtq$8H  ŬP5KSK1kj@pֈLN[ TEnYdx$^qגH&ڳebH! qBPӴ7`iJ(cisQjXP5DQKQb)Д,lq lHr@zNw7~y利d2|lBsVHZ++* I"+H V088+pVAyP#jI>CE.Q\E+ >Җ,AI}#9侦 KI}H)Y8n|suބC 9wpՖ(R/CAn/ >%Tc!)#olQD|tO^Ϗ~?<_x:/8 [W%Ik\՝_>E^>?`h<%T/ f]"=(u"&`fi}dʲp@WƯ9'y4r"3\-KtefCCT_mR테Sz Ze@-{Y+Rqk8+$UXP( 3ӅK̅g=9~7]-RqZ:-gjWaccLA{C[yI&x\PLX`Q,"O 5 E uYwנ-!rMfqhC-FRj#ٛQ}I5!yн#[{15j:n+i}:='L:M04j܅BKބ-{\8T/.JThWcp.'댐eM8ǖ0Fϔ)"=BhO=umL6r9e{8l%}]sfǗ M"ź&HTK8i~{z31W/]4&:{ezbpɭ^=4qjf$JIxHZ(G~ O0i=$(ɠ/?߭8;Pzz~}Sͳ'˳?x'şMN_.77"./.f/2_į=qE)-8W. нeGj}8a@S]hV \Z.LVTe1 V$myIG|;_,R/!D. d!xZ+86CT_mRdF5qZJ6:fRQ}I5"UjETohڜ7$&#4mνmQ`FYbxJ3z-dF5y@$0WT e&oXU| Qh8FqҪa "v3|._*E#crue!wGC[֓$?߾JcwY|D@-סlw/ ?h "'J/&?"  *-7~1"  ä!QH IPйM!Cnt.Q`Zk~C V+R !i?nhTeٳϏޖܬe%|ZC=hZdlC"Rb׍Jm &r-|=v MfaT>^sDb8VGSLr Yp6K8c0w%֒q{cMT ߁lt]Hq.$6U%NPYy_0[\hI5j=!iLs6ԪCT_mRa"oBpJMKvp|w3 \Y2&4u.+@+E[{rJ]bYJLQ%1~FZ7jI,PֹE gB*XF33%VqU8aT%̈JY +6lԺ)]C%/,ypJ^[-3Ubaxf žTTɋI #B]) 1J=jT'5鐫HSdx=wTKQ2 ,GU=1;zrpgD%KK2^2t7 ;R3ȧ /^g՟zGṙ';:K7Oﰏ{[{|kYp4 oT[."g"f@ 0w~a܆V۱b~,7иk) e8ϫW:7<'K˟{[ ' oQZHĔ'BNeF嵞,Cuc7+WJ Ff7^Yn>77 s!n \ypR3Cƻ8ܹ;̂xf";uJFxmjB2͑q֢?j#h_^pW)Gk)?jb^kPW_Ne s$ uT~l+jWT;"!zqEYk:PoBɧ"N|?װ_{?Ϸi$KՖF0>{ ֿhaBa=տ.|4V5ȴ*D_)T8P{bzkr!9.s،v(A4W6:\~۶nJ1Xk3#LMShX3C[7{fԚک5͋SkZd.]5ŒRaeA5Mf4 @5JDnJ#5g(y1Fptn^:mrfT.=_S8M%2Bj ʼ",^o>e* 4uxדϟ+d{"{}f77Kx]ec 'ߺ jۍ۠bṙ1(5 L=󊌫͸f%,n~&YcЗ$1k${| I*_7tYugʚ7_aŻC0MoLxv{EUP3LQݻU%I~-2KdUy9>@gmyޏ!Hk ٚWo>ؤ}Le/0F?+ppM˘mئeLšr2iSQ.ZG G9G9q.fVrhnI0',Q;αFg`J@K /`hsAOFsVQ<GijM ,V@GKgKG1ml]!Vo9BXLv|U'{ʧ~Eqg?BHJxw1#PWshZUFREۤ D5)Ԓ(çg$~B "5+)R)"Q+ Ar"R*c9S νҦΫ@9Tv8Փ2 !>mEfTOjǨf ٮ.Cp:U ھTOT3r}/%&aZI3hS=)S͕dU$K`GpVeTb}/Yʋs GXf)/DjǶOj;=;mz1)5GnqwEZA*/t)B#^Cw؛Kd7BDG iIM$ gmUBQnIp&C5qTuC^*@! E;1I Ut|: tcLCZ@}W`@yv[i~j’s5DZК;,h'3bQB>[ܓ_G;Rde 駓wk̟qa훱lB5>]|xavw=⯣55?ٝZ/S>0}cb3dү-mQ~,jIeX;WdSG4֊r{fJ6 :GL@fv^Rkrk& D@muݚp8PAŹRCي"PfN*V$勰邥"d'|ݐ^nKEOh]&d3T+y`}ܝ?N糓K HOZb*D ɤLS =k1ۄG>vBr~vMoG]͍ۭwr{73nK)$j~-_[W1}P9垡KsskWGn7Zfqp2[jHٕ~Kf;bwyy! UN:ԺI VW!SU^S3V6к5!_)!{|bx֭ BT3Xsz\[v֭ USGe*Uܫӻ^{s>np3&zś{|3o6~# A7g,*fYQ1!(}gE9 J[+}G]mx3)2wY/QV<>P~>wIjN`^s8@-[^TzRZ"4 5@r;(ᴪD'R"Ip$d1/XĒ8e4`404NNSS_-8yZb**4Ȱ4gDY_ȚMdbv,z0f7эY-KA|b,UU)H[Ͻד10坝XnLx1WDP}rBy"'&ܿ|/ωye̓d\i X>G/^&%}1=O7 ՛-9Fӫ!WdBsض>Un8q cz vcct.PJ5^S6JM}U0Dk S%ЏMkRJB19CH'DqqL}KDc 5q.E6~+ԵDm7>t?f Jaa u9JIrpЊtqwU0z %\Yd}6FLAs= R砏Q=yBCnAUݲAկ|7NԚǗhuy[XNݒ L΃04t"s^_aR8F2OQ$戋'CÇ塋QBTILyм&8kp}ӓv2#Qe1/hFewʲRl0YOfi3y4L?ݫ_8s{ྟ8BsbLg@Kk䢁e+RlIr*Pn2!9P0:O)NRK++2V843^nQaK{7+Ջ Cɂ3ii$+VY̤f\gm2K ٻ3ՊɝmUF˕;r+V#zDDB&+H*BM:Mtͤ3rK&SS$..p:)5'ϮyxV{u zV~{aKI]/n?U흍 Cfvw¦7/γO䉿_@x ,Lh>p>mS-ffn5 pr{ۦs Cw6&2 <;VZ0MEnQ2$x3s+l:9GΧ_m2apw#޼Lx"" Ho^niMm hm^v{Q  Oާ6/WbTF[.-NQ˵nRG> yB΢y9OT]wl^y9B cAuwQscV|% h\>65dP's'pGiEԏvuwq[oѻ]^~pϷߺ>Yn8AQnN8ֻ-w@C޸)Tb?X !^h& h?i4hJEmS ӱ鄫0-~$ffa Ҋjmi50}´ZF &K`ofyK[Ҥ/E)sL,]oJ#Ygj&3bjk<[i-2 :+[hwDZ0˕ũywc>7ן_ίKqT9/5qY$Qn1ppow^0ZW/+V6q},//S FbOwuF g?3kޣ\Q |YαY6zgw&yOa:^phgM@u1e9#It cS}\3iX*N $݇C},\rXNȅ;P1'=dNnؕ9{qf!j68U~'|1yNqͽ- /R3=/ҔR♥<9QfhnҒ 1-ev5)UOGu\}Wgq ԁd`qd r(ب)V%8/۸W" X[@5뻳e gO/ݺc"ͣ! +P2Kn1@׆ڳ#[]Z,矬ӻk "^oHA}Sp/H:H7?2Ni:e쾨HgƫDوd ehBxٜ)C씍 O!`u)`TR-ja55WgϙhU\*>%k&Aŷ>r^!u߾*x[w^e>|\˻[\yq}Kыll H@z}^Z`^١L1>xjӛ2]/}pT>/yva|jh#N;2!vL^ywhSkBƗzbo˯>ysu~:[S~#<ʼ|Ie̝9,˼P.S,;q_ҝύ:B:9a'dRҐB sPI[bJN'tllV=IJAR_$[%}lO"QbB$ѣݷhIG;؀X4?_o/=<ĽTEz܁ CH+bƹ[k{"握rCy^Z?| [TۧKKV}qխ-ow|g^ϼny>k +ehr^eQM\\PƑsk)"^,S%E]!6wb  Z`{^dPqEwWr\H{(fi ݗ:R[#yb40W[H"$SF`m^,Br%SZ)QfĘVVgYVV$'Hc{s{8r(j8zQRȫ9|},(q䄉dJr"pyVZꦧ##?i8/# =NHgPhˆi*SqwKI=Z]@s+fV"1L#Pa@I 4XI)҂N{`+b9[3,>u cup4fkp0cH‡axEޫj&ko|MUXB;Jרan!PJb$.7Y5}JP%q9ITt$BNZjIlP%CaqXq\}0ЃLD2'@)F#x\@cSziMå=[ %˫U'TO(>vjȰ_3El=ѤѭW )pd8&h%tŐM 5a|BF95Enw_ 3pOLy"$fiK` oJ/4Z'o܇KF4Ӛ\'XwRo\lr;oEšqo?\o>B2zbgtG&(S>*R$$Ф}Hܨ#/X%"3Oڏ4:BTRM I\ǡ0e~VlssTVq̤Cf X *0>mSǦCljSZ*-\@-U }}>}‚Zs^ؾ>}B55plv*W ܍ڊپ>mSq Ԫ gb8j;ھö}´T@)Xf`, !٪1l4~mc >O>mS 2|cXRaY$h+!fѵc|E#&C~,'h4ѝ0s݁p(,FM qE5N3= ӉmHH9qI4'9_2,Dž/.{Ca1\rnӨ88,ƤhԆ닲 [<RZ1z ͅ)ԠccyZ|9_]ՙ_΋Mqt{ҋ*swc]+A,~(毭N][o3e9kn ;YhsD>wfˣm^kg\G䝖}g(MLTgijY=Jdޡ܈pTBb}un{ R #^j6#ѴG4s@؉"c7 3q^lSȂFKƊr yM|Y@ҙ-_эpzȇMm]҇$Ӡlߛdb8G Qw| ׹˙6-n@قk'"MftJ)LqܔDNEqđU6sD Ũ8#.U4Ќ\NJ q932ӂh-JZH0Z\q$iM,mA_*n{)WN0BwwEЁjCoK}v!v[Z6NǠQ;l,#nJY)Jb YEE(8ˆ Pg~#mҘtDQ_i㽗>-1v{. {[*TjN\"(Ic5Ւ]9rkQf[2~qme0¤!J'ބ/t +w2. 2؎+!"޾{ɋcQճqVݩޣZҩyLRwt<2v}ݭ [fKS-o}f x7QxQN?Q͌iyE553qw,#Beq᎛ךjEtCw}'gqc>hVT#|} m"NZe[ؾT_R]ӎI7)JӎI7TouLl_ MXY}ޥ`hv>g<(fa 7TK&/s@- }d&.me#o}Ex7q+渧2 IDƒr.U`!"7zM[Ҿs"$V Q=LtI7܁MsD]ɈҟQ.֐e{a^z32@Gt>G; ;+YӐ\$kc6II* Ͳ&q?n5P%6_ 7sJ#Q*RՊ7r_cw7o^cv[ t )?SU2{vBv[yЄphmK"4v#nv[wzҫ9#x o"*F9'g-J6VVo%$N\K| m.'Exŗ;d =B:^ctAиr螎"Y [:[wfרM4֗ӊJ gd\ Zv_yPDj plR*o_`wmR3T'xj 7  vwjV` KFӵԔ$HY)j))ZeQJ!@˦d*Q WUjm'fmg}^UjϐqWfyQ?,S_GW7oqXt!'p 1bэޝ(ʥ æQLJJWyٲRU itś4z/%Fb ];!bd4Q[[2Rձ֖~\irlm3(N:Qj:[ШB4qr$Z*|NP$s#BDK%{g([}`xdtssMqzصxnMF9D5n)U+#x.CKE|vsu?- P_ip ^ ݠӠR`A] 'SJOd zQ ?r*-- .9(p 0F*{|w﮾ԢcE 2gB3+0uNyd)ի1q< m*ߺi9Èg˒""QDG3|k&M02j\ձyFБ\P7SQătaJINS5 +BUUMFK!ET!jUހ,mYk] Je2ylP,oO\ meUQb J k0D EY%i*HCR% Q0ߙ_ƇkPpƋX0p2fջ.lQ2*$zZ0(k/A>J¼ ѵbJQr}t!'h*(J^JRz록'he^8jMbr*2/s\@y9zM-Up[ݤ8T_ޥښ+iZR#䕇x`JSŔθZ&^-7^\q`[~"ͫ^_΅_7%n妾a¯5 |5'G4(ȇ~v//tUwvJSӸcۅ)`0WVObaAW /}0)g+E0hW0w!C;S\Aw4z`XS><,])o?ɯ^ X.;"ԁyƈeEX$;7C̝V2f)C_8҄4{̏vTun>d#ӼT$Z1/p"԰ \*!)\c\ s,]+`};NNҨK sB'U˯vp8{ Vg<ʣɾ8~mzr[}u]Tޢmso0EĸODJ;Q@GpAl5cR?Dy # ØcLv2z,\ ׋=AԠM%͜Dulf*(t!] {.&Pb~_7%ܑ 0ެ zΊVꧻioo_VmY}(YE`vHi) %aMa6ӇߋǏwͧ?Z|tyͷwP92oK✥zjtj,ZQA΍ԐS^'{ Tn{;K?WW_(Y(B6! |TxVznf"Y&): W}?'W&_9C7157G`Ssz5$&#RHQND!P#2n)[Fv9cR:䢍[Zv:2%}/~'$B1A=eL8Js|s=%I: PEK9gǖХ[ݼjnS_/H'Gagto3Vh쥬܇:#y{nf'1ֹy7"x&V+LvD:t޾V?W0;Ճ^1dSusY=FOTC^k|Uk0x*0QX}1}x~t-S~w[~}\F$_awJp7Cs<@`^@QGB :G".[>;y)ZNAH gpҐ47/Xϫb^g S)I+kXO"HuguϬ-^$SqcϗCg>^Nڝp=pa(=9u{]q7#Q{/SU`4ሆ/cx29R?$OO|O}形,@B&r?wHx3| Ȁ{W29x)6&3FUPkus%RˡF MI`UDex eiC-)]5* @NAclJDe2 )ՐLNhB/jJ!fَ1qI9aoNa?%ƁTa )o6XyJ !8z>`<b[e1jF$|Mt45s<|@<׻T3)~*VqG aU\ B( ꦭfRʢrT` 4TiDUmjV@,)w/BҵM8.f@Wj`g8hEPz[/O&F A(#cbA! &|x-D (z"b0留P02OJH%Àx7n3:cqjTOBT*X(')9'9fVc\<%7~oI~䍰5 9-SQ-8KA?IMIomeDrt0Xr;ueqQ7\FQpy]s*0QL gr. NP̅ڧBP"Id\oqHu*8^v*ɗMںe$;)K,JH@XF?FGx(swgXԫm|̔dq8i:>elټLR%^G?90zqf59LK'>%O+'AP p܉ ~ (r#,|tȵ߫U!OGe%Op:xjjunp] :V [-PܫD_"~Q*zױ^#3za w .5FRR^GqebEyHE-ܛܼ:/j>Ewcy*!U7?e kdy唠^DyemAsf{yj@K}T>&Q{v\'?++2+#i{ԇLo<C^U"]WkU[tzͺ~mf9ze djŦbJٵǬ!הϟիv /^vrZI%ђl t|[T0Zy ܴ,AY'[E\+yV0IVegth˚g٧/j@&+NXUd)g{[=͗K<n&[EWMTM nl뮖&w/xFqX+ڐq׬˞l4_^Vd3l{К=Sc͘\t4' o>>p<6fmi;\ejL]'Z*"67t3)WQb<2O^1fr?vPG.コ73~Z ȅ7sTX{g9&(TF4bܨH3G<{HQK^14}j+JJӏH򁑬|#{ӭޏ&: MN-b8 ^w'/uJǬB}׷u̩'pE7°'܆s_(R`;z?T؆@)tE)!&T罈 .yp 1pɮEDy8K:%HSDdsc* l@7Rev8%`Y0#Xu5QMWWaE ~Yt2%EÆRػ粬z1N8#'*2U"I2$Մi*VذDFISTcDVBr҄cM O 0d`RHKy RE%,ɵ[S!+i:7d~pWZ4dFa(Pyǻr!^c <}#92P$|X'`+?aX'Ws0\/7lF``)k@h @Cw7 ȁe~Q|[ 4)]Fr}L0ڸ}+wmS V^tuccc+WhT}%~xfcԲ7*VoZ J |KAUA\sep(E,V,N$6|-x}5A4ۺ?jD1\>RFx;G{Sy?'$kb B",Coڔ9*AdF d)@&F*e8I*f8iq"b9+AXQq6iJ`6zbcUN+j{_mzF eFmXŻRB5:‘ub~>|59۸O)espcsls~>h/9}xs+Ns kҫ?RJe@ڮGAJ~NA',MRjt%ԂX۷hׄ>ՠZ}/pyi B[N ~͘3yy`(Eza&esw>+sn͜sDx J!;v_u@o:ZZɅ}aG,fcNfj&AqYasJnRUPQ U!J7JUzN:D.B"ﱘѼJ2#G%X*B*yz=0Vi1E5QBk벵a:e'\gRxxҐ}+$X;BS}*O?ch9kٸ`Nr"j#RZ쭵x h sIޓ:O!o9G!D׌Ta}ZkӬ͈m^z1QXNp, ]6FcC^K?&y/MU3Mō=gpx C[Ex>2s(" {~n 7T^SZϮRH*vӧyiAw4X mn"wPV2Ξo+Fe`XOl [w] lGXt O_Հ-&ati-d8뤊"l fYVl=j VF?ZhSV ,҂OH]g[4W{㯅 >Oڟf.͊Mi~%/dmh!}gd[⩱t&4>̅1ѪWVd/=x$p{xLXZ>h]V+h5X*='Tx!%P a,ݝI(…D!fs?cRnbi M&ގEW.'\7c:'DRT[}KkeiHҰɁFRpu4p:^8p`%QM+1;nT6(C81gu )㔠Wpx8˶u)$wDmJ멾'OLIOU/l$6jy@ZLbR.LpFEN ѩVx1gy" <ZMRDk^;oČ^1Zҡo"tQ-|>ފ<*e"D`X1avbV)Q9i8Ҙ 2߬PY^d,$V8 7!Ť眹6!- cMH~vDڂy;}yEf,d@"4XPE.t7Tk2k51c5(}0HUPr+^oǁ-uWW{ =|cl߈X*:w4spމ%p ))Ǖ1ߦytRPMuQKbhP ;2zWf-*NK7:-C@x vMs?, q~? yG $ >`/{qNx71l/(u:{WJnx?88 9bثa"tGo:'c‰T܇twzbRNzՎw15 c*1IJ,ΉPщD)Ź'XiP,4mz5l;~Ļce@[s߭xػ`ɴLA/URI xc &2f-(9?C9ٌɊ!-S]9Kۿl6017VvLc@a9$<1M)kf,s|OknM'ciso~)T pRLiBwnɒISa-M.tlD6rF*Mk^`̐k7ۋ ۵{Og{K.w# &Ҙf{0y@h,1VR\,K݈j$%{2҈n6Y ٥:4C@OD@vzʊ'u/:p&4%^iy +E!yZx$pDa=E#*zFpg7^R@u6Ŝt ;ӦHTzgRlA*fnj67wrnGiIG_9/<י#L?n'˻Lc/倡.7W}s~ *Y6g3f*fĪNj4Շhg|f3Tk('e+{vW-ィK@$Rn9ĸjZ1fP3+sD{7X1ĞR*%Gڟ;G@6lӔn3/}h }+.҉wT[O^v685nr+IElө}@hu.՜V T"ڷk(<#sh1U3捥tE,R9}z~>i%$ &U[4`m߱`~e=EDeoUR[kVBxӣ}n`Fg%sN^1`/6n4,+PA$V2UINVJ[[Nw4ng4n[h+:5 -Fj[[Nw4n>bͫ$!B޵q#KNeyle8 ld9Ơ/l[XFGY{nR_Բ8*V}XEyӱ)R2gx\QZӒ:ZU!!գVuEV5pm c U-qM{5-JjݩV,/{n{)'̇~XnO㗕ڣ38fSc9<6"Nb%jd&8|}{4Xo!ݬmylafU͢TꋩrJ0i抟SwbŽ,zÍHZ.'fw,9=Y:;4Cc{h$G^*s ճKz8Χu;@7SM9I!^GQU0%{56"`sC>7cd{sNjĝ11ӎk,.L^?=э ,l< ClQnM֐P["k9ϟrX)<^OgeQ3WxL TSN8mUa<[N%AnoI\vo a!!dFZXcUdG@aC]:ޅJp sHCnd|Dyd O7֨s竫ߺmEմ6'd_5w|O&6?ˆ{vs+Ln"^ؖbyYQߺe1Gl3viFѶڰK9',Oe6f$ `KF hxiRF7janO:5{YYSI@Yæ YCJаq:֡%εf F|; Y#4ub@ fE_԰%̈F$U&jR 7555u\!]LisBVƒ%@f a 8rSDq1JaD< ӌ **M~UT%T*EPL\}V:W=Gg؞rxlށ=wk, LI F !&Y8<1@P Sq *ObQ*B|{*:*eݪe[sc~5Q20&WQb|:օ фn~{y5!SN>|o6?@;/+^/ɝ)x7 ?+@b*nm4~*%Fw?c# i\ZL<6>Sqf<#DRJ1K̰Oj=1HY![nT@Yá:7F[ۖ8JҝeWwqYOMjSO:hWܴq< s6ѓ,VMktS:tPz5_U+E~^W^鱌\WE/{S7hΣ'l͙vCZ/MM;Ɇ0Ͼ^Q2s.5l[}gmjzQ&K{Uv4+IY Y>d9͇0 X$Z$,'WP\OWEXmAc =Ker:XD`=+FlWnw/*ClXGʮیX֊VVq`RA\pK"k 12*s4$bDTυ-~!U%,ZZDU z*"=#DەЇ YiFxҌZ8Sfڧrs:Ũ:,ԂnQ巉JʓA/PwSJMU s?TanPOm0I=JMS{( TQOfcNd/PE=JMTu*z2:!>%+MوA jpڧ]W$ngV`R6r<7OhQt41QB,=dRKmӥܯQ}]kii+ޖm0C-CykZA>=kkZ]c^&izu/>ױhGգ*^lAkzUZl?Utr }=(lV91Fѻd}fVu=u,*ْEb遏tnnFM% ɴ˳>ѹ_ݹdOv:8=kCܖںXpG [^Ftmw﫛l|7aw֩Wݪ07+ZՇsPW6kUG>fκҚrol>aN X2βsII y(iscDٵg:}jn3ۍuMhݯMhbO~XnOxTܺeIx,r^@Нy3EzZ='"ARRkomn*Vovrҝ#TB흺xcL݇q\N>͌ 2K}AiN|moƛ赽{<]taRz yjj+=E=/xSh|q˻ueڠxrL fɃ~ j–Qj7c<4}-Oii<|yA;8~񵗪i|*665' #/s*#җB=BY]-&W/#(~{'@!B19JQL؜^^9湖XD8D;{=2יmU!:'omOCikbĻful˿؇23]s7bjkg]ox`y(tÞIaErxn|OGZ%k-6wlh!nd)ԌbU,V/a훎#ܰ~q7) skɻs{FcRߢδ2/!ϪmuL~~}qm:e_& Dv:2_j3ZN'z0Q˕ʵ5Wd br?TIBQFHy4COC)\ښ1H,VSLEXk\\EE|2v;#jbޚך22SDY+곪cd%ts4&c*_g*s SWOz4D&emU䓽+)W܈CAmHoBpS~,R2$G4XfXFԦzqF2DS̸1Ҡ@B5!UO)Yw'AV[Kz~kf"6Y&mTf+uְUoo7Fտzl~<@vpƖ TigbCn}xz9n\y}Lg럱ԸztW`h/Wɟ{?fP@C8At_le}s=ՠ|>6҆cgʷ/(wK1讏?=1M'RsuB]xT!aU F%wWR!z}W&}rB~BȮCiK>nvZcÞ>~}UVjY}G<]^p__Rj" )*C #U7UZ>bӗ^p)a,;yC; ^LsSe@OahTid qmkpRƤPe9=UGb7NkO/mTRs֞ 8DGnB`()Tk*_ԛ'AZ/ [zgU<]}1: Ow:3L)z][$DpH?쭐Z/?C\̤en;=2G^ge9{)HLK=J!*ÞHm&?Bl>W}r> U1?"f\hz\n OT^pC C/ GX RjVI@šzB;\ Dp M=c^{O@pڏzƼVj)A:FgKMKIi/VTZ5pŠz+*W9mo$>? "5B}qѻDiIA^ RR3+pEsD)3 (RsIYNa?TLۆa?TY)f$pEsD)(%ΕsrDZϐ-a*{Aaቪ#ļ}3%6z$gk;.rab#iRVi#IH^vqYT6 QyJV 4J\KQ%OQ5_^$!qs}oWHޮ>fwєɯeki]ִU>#ޚ^Ԧ]&9_+>9f"̝zsκO:nG ,ZFz?:5R/5nE IyGգ*^lזIZo*-l.5QGfω1?{F4, 38.Lهd':#>Slv[օe v7}bYdMnl@ek?/¨6h sVB=:E5gz찛еgC|^.2wntnpvPIЌ.t'Zdtxڱ𵕹].aZgHNBY 1:e@f7y{<ɫ,L* t]rKi4u [zaV r`nldj(^:G\m…մ+M ɨi(QXk-Q|E|F`/(JGlhU51pKȬqi0$yH+tAW@#e_u.|'7#BO? ׷3ߞ# |/fߢhe;?5^gkpeon/ieYy8qLhf'y% %sʃ{{8*Zp{W*3,b=wx4P^t) KK$w7zҕ> ]_  ;q&au({o/;WIlMb[?\#~<0+WXRz3:5&Kn'UsǛI2Ep:76U &d>m^%8pç#OkGl脋 v8_a \ʤRLJ1qBr١q S GBd-߸O;B]yQgUi;x5>ye kJc^Q(nhHt^ӊwi"1 Er¯`H4̾,G|:n+(CeCD.1i8nJ"iCmdRv1{ Ҫ CiW0T{0MPl/>l>G!GiɶIft G$MY:Z 2R;9:FIj*Ze ]͉$G1ŻK=1R/`HLJ[Q׃{Wa"Υ!||>4O?sgbo{5 j|2eQW-zH qVO 2cuW`T$T0 *Ό)hDB>攼!,+d{r``PCo!v>ߢq~ /W_h^_`*`I76 wJ$3}ᒓ.\" RKaBqʧO}\.x>8mF1ޘ $KRRzK4Op"R.O@fM$)˔[ۛp`j].fI8<AO~v!xDv3~ #a(`g4dFe f>էU0kW7 rH`Xt#whCdzbEχwr7E 8xčR8(~6$ŏ1;$=As6^!tmmj/J1g@&-}(N n4#]qa]wʧ<KebS՞ n6,fa#D^IMAa5 υj 送-źas70-i冏Vb]jQkUZEeVQ@NhqZZjW*:UXCbUqw"*Ӎ8ʥ愑^Z*De[uнVUq:[LVy+bZE#6@s^cb!CkPdl\ hTS=QK@KHqZvDG"GBu+UkH+/E{F+O*vOFV3Aޥ6:a[28 Gܢ,߽ mIE\%ڭ[~񾇖mˑ. BH _O@}7pZ7ͮ pw k:]c }l%Rq4u|urwJUZ ŘDblw;ӻK :5|/[)b<u SKd?[Y}|u?6OROhP¡:7(u)V>PI&mнw 6_[vy=AŏWѿ+⵷~(8:8h%r/74^/7> _oQ,`ID$F(f_y^ID%!T/ OLtGZPmn տs*9ٹ< 3/ԜpgLV1<>:UiU.`Yk`p{١8ep~R^dvq3I~ɏ{z {9]Q{s{Yy"zR'?b[25fo 7pN8_-o cfϬ?9TJGnr%,,p_, xGӳ_A8/~^35v_)i-|ճ*I!y3NεaLM`T hŃ͖x]RuWD{_^;JbMY(89b:oS(MRyT*V1"*%LRFEHaC'㻱[&~#B!1`-[=f7'WϒgCFV",\w[+[mZG.lp*c$,Gxpq $-|M$RD=3~Zxn4B*7 *D2NR+THs E l-OՔCQd-e OQZ`E\pIes ߏ6t(#RH)ujD[#x;ō%#-]ِKF{9$RS% 3-X0,F*|MNŌ!g{N"Br -+Nwn)jh ZΥzqA|+Vi?ꔒΤҴ`ș /dILL"V Kg«J0JUo㲯4B$2gS Jehe8!)xBSAEH1^d65bA+AI^ӤYhVQ#՘0F#a# hF $kI2ѩCYSSzE#0G9w8 8hGQmN%( %.MhV@s i<⾽$ ?.$[# v}:; CԾzzn .NVo-Q"f/ϖt@!iMMʗEmج|*=^㕏/u䜠|F.W#I ?-|<Tr1n 螠Ri#I/znlT F YetLtj%:5c\a)fRCO1jDHx~47pZoSmKA@:'5[*Zt!0Uu"y_N\8/$Rz#,K@ymތGdZ O c8jK܌7o$`0J1,S4܄K,8 _Y*]bxe U9)d JQ;~3T7yl~NZmju.Ĕ^mة3q h)h֌F3e#DX<֭i 퉑d|kWûQ eTO` HZ ڧ L2T(?L>vu8- C26(p%Oܤ}R-}TPؤ( @ûQ >s˝<':==[Y^v]Y8+ =MopuvtLvVI J31}*蘇"_&@旉b|[-}KrLmem o< E86m,9UvfoXMi[h-bG6jyr}L g܅drg|y"o-X~1R=Y>EIJ|*wQ g=]j![ߏ-w++_3`+QC 4@ȱ@{ `V/Qv=:^[$~[u滩VK%̒Gd%nwɜ ׾]Ev?1~&FyC[B#U[v1^3*jT@Dl-;u\Z\)x_si+SݤZ&D/ϵ!ȜJ??xø78<D H+{Ē 80@HJZsYVI  Zf$@^-6,5R{wKUW^`DBkR({VBEm_~s[ݟ2ˬ[~;Ŵ7ɦ[y|gV%S\/Dh_>~Bb]>~{EY pA.2 q⢕A=r&!hMN|R`jFTI@@*,b3nmfܼqθCScmUė۞{ǎc[ ɾֶ}$peWgmI$$(IͦAz[Ɛ#{KcXDz9e<sM v|9bgI8@ ػi,Y|b*eS2ee>C j4) aOIHݺ.F-ąr̺\ķ]re fTRC##7B0*?&!URK Ʉ'ibŌBq)xHmDZ1Db|.HƖ !D*(9${.m,+ٸjy|1.{*@kAd6g0:{ϔ,FQ {_ >߬ ~=?Rr;7D鿆y0 SZV>K)1ʓǨ_ L"9\cS)Oq_#pwz^P2v_8}wàpFFYpzGVXk| b拁x~w,f`uQ;PgVV) kX y!'6m@^cjgcT3*"=!,,WR @kl%hh"8E,"Jbm# XB%:&HpJ4!)8 ]B\Y{.F0$NoFA 2&#.BUH#@4&iy,`1Wk6 )PCu$7RK<7P[FE8R감7Q/$uzͻQ»b@F)|g{/SX{$y߁:k7Q/< wc:n7x=V z:,MM fՓ0u~0]A8x{xD~~%3* elD 6bQ@ $N5]|zݒH x^w̵'+ Aφys:BGK;rY%fûCYh*af]dkVҢJaJ 25Y¨,iy[TkUIBDRJ=fy 6 e]N!B)u:/^ayժNEUJ ^Oq^.WlkRKv&.zc yFIL(%t8'o4]Vn:ހ!R({badysSK ŋґVR[ O(濾wZefIlU nXZea`!Q IUoTjM2 `27-77bkB{`!Z]Zub@[g}fVN'tk}ܟ:l$̯0_2_+e0,DZl~?6}8K|QA"`O%rkSP ڼ1_k)lYy֠ V^(J=%T6Zr+oYFgC1| @-fCֿ퇢Zt@]lW憲P{$Yt[ɕ7 mf1`Ĺ'bgb5cY@孯^Wrb/@9'-2"и<6;L z?lC;k`k\  !2q 0mv$🃈+8s3ƒ69*6!-hcxi|ur??v7K誊3t `<mgw 6]ނLfq `3Ng: 7.*Pws;ɷzph'ElDvjzs^)h0vuaT섋*H^-ؖ\Aro=USPxEEӽS 4"Q:F0{eb5❞nĉ]3Ⱥ̲( {Eo@aؒ=Id)&xs~i&YHtB9ng1pN?L Fu)P HDۆSgGiR%sp ԌosUͩId~&s{OL8 08@ IR1SP;O @˼S&1$V0hao0)cXu-V iq~6;b8Aٹq @,𑷟h7ֿk}FCye,Ƃ\[k+.$|, P@@H'T.iǼ"R^E7>wEڅ9p 㖯LR;?Jڼt=K$ug8d7L-^KW־!Lk/ 9f4:YIm"հڷ琮 ${=X(Bjɍ$`nn:ƅ90*Ouɷi_nABmWh1,+ N Z˷oeGٻ qeh5V>PK6 1 HYe@ @{Ys>/"BC|8b`uқE$ggbmjEWno>YzQo&c`* 8Gv;쯌|ynAm~L뛟ofEUߌ=nvK|e|:trKNkK?z+š|yg<̒h4u87F}z3IRuWdPIJ0͉C@0:Tb~ym0ݾxF@* b(88L Y%JDJĀC]aR8%{(_/(ŦNpHo( ;Xn<K XiBiꔥ"e*aH6S(@H@GJ| )8kcE (-o~*%T9Q a!!``X_~ca9V%9H cd$f(11d<1ID'#)a=ȸ@ZΑ婁Yvq5ioM?u 4le_W/Qv9ƗZsE<V<䥌i؜*ff!9E\j GfZCNY?7hʹ ِZ?{Wܸ_.=iҍWCjFں\2IKvk/ly%9W߯J6EPhj$4~ttW Qتw_ ?D-{z 5f|\'j?rVdaД~՟@h1AKjY\}>̲}=Z;g /FV|wz92Pgέb_w*U\35'Lۓ3NdJdHP1MLG\@"J3N<ˤј )n6`究T3! !5,2,aʔ±%)sZԒȈrIar.yQ8Qmy& Q-omB\$w `Bش(h IΒ$66j8RU]Zj=oTY ؎vY 튓G4\ h=&hq 0HAU#42,X(azwԀ/q8Hw5mhbH5ZDfN6eN1m6+7F6@rb}~ʢcGz)5 jh<mO~M@ZEnǯF҂sIYх+ΎutDbe@u,q+yu }a '`h#'.5J!""UbѷXzcxk :ZͦXc=Is%[On_[gsn3Ӽn4X?.ۭȤVo&ᗆoJM2aeǿPD(~.`Rwu3dze \fٌf˜]A:^+@buo_?i}i^dp8;83$wɳ)vig?}{X:EoX$|j2O `x[|*+쭓^ʯ?W|6#R9d81LӬpc Ƶw?دJ Ox5ZǕreH˭{D/l`;4[ 5JQ|3pKRʞ`Ms" f2JL/;e4)"M74nTL8C_ ٓFF~ajOlG7wcYR$Gfp ̖#>^3cl;hApJkIqrl,10&tԆ&Bom0<.WoK=KWo:z#"sey^7*n0cS[*Tzj1$|;@Y&^LC4V ͧ@s^{ճv(/FyCkfBSƐ'Dj䷤d)*QnJ](w=J~`E@Ee6Ac! vQrA 3tDspOXb+ |߇_)V)!'hUڪy3_iD;f[:F['65}}vEhsZՠHBj70KZI%%Zr՚T*Xb8,MP:sTJrGCq&!Ix1i3k8Ht.A\"Vh5XzZVnݮd$P+`r"D4~2p)υHh6[L0)l}UɔRfu$: %r*-^s.@ 5d\,U+'PuzT)_R;UP67P_i]j U\ǡCU8 %Edq!B,UAjU|7 TCHUb/POi]j04UqE$պ T\K Օ뀪ӣJGzס/}u*!F?~\U~m2Ba͡[ZdKTl5_lv |-}ycBgj|ҋWBihcߺf*;8~=j5_-fٹ]~}cEO-(˿վ~n՜ũ}_i]jvNܸP/r{dzZڢBnQυJսXzZ:EFz@,}@Fz 5"<`ǨX¬j8ywa{!ŽÅ # (f:J|IlEQLQWirXdg\JїyBAd&uyAsPKșM's_CSa=&/kA7ۙ :}&pͯRϋ[x1Gm^6_'aFSM _|n:5FW߆[fRf*f4XEpM#6ey^)F'kVһ/2TrQwśMzsziV~j'_͢ۥbleb0 z^N_*;x6ȋzneVTbhÁ揹QF>v|b+Rs3K}0[IR9%P%Bٯj)  5ʩ!t9%}‡g^pݥkk{fT/t. Υ["E0~d"EJs8-R!n"7e ُڸ+&k0=pYsF)9ϸJI~dȃ˝[ *zhj4qC~E>#.5pTU ~}Uȇ}ّ8r}JCiHgҨB,2QX7R4Cϓn=DU1'#Q[}Q%s~JoGazBIgྨ*QY;pYs_Tp:(nn#33}V^pFff䐙9gq|sUC.ndC._r*4iCVzӺ;k:wu}#ڪ=p_'0U$KO`$I,mo}2-Q'#Q6 dj"+ޏOF&,Cɠ>D)A$J\f܇ ܇Rj^9sdQ(υ3.LK=KCnODf:[-z'"m7uྒྷqq(a__"/.5 =ut}Qrskr_Tٰq'tJѽ>P6[;,gҽྸk}3>e {}2C|_*繁^p>Ϥr8oHlя5/Fz 5320r5sd>\MgA{}Eyh\8pcӺԀf>һD-#>yK=KBbs>Pޖ}\š4Hg>P%X}ROR#3>E6T?ODz 5ּr\MȰq\MAyϜ@ġBzqwӺa8-EUM pU]|#j|܇܇b” nL'x 7]E. ꗭqRV$so.5xy.g[ ?#.+JUmr\m i=+g. (imt+R9d81qY( ٳ|6sj|e僨&[om;o ?pQ\\ݺUƀyŧ]$<%GQau3 ~>Z&wy:/(I]٤s2ޕi?F;"C(}aJo~v=krЦRyȶq1d?{۸B_vs~'6 2K`XY1ٹ USMQ7ƚ L~Utx6u |W`@ß|~r{iYlr|׭3i}\t5}?keƑ;‚[ٕX~*kD)o)Æ1!,GXa_d}y{a:"HwBI" n'\ukX4;oHEr4=FkMPzW㞰uCaj#ݼZB<pӊ#aYGQ& 81Jc-R)$Ri*Tnnlsʍ[S/A6Y6Ү,_}%?Sh$a)ldO@jE4oHG]K@I$sJv477ݴqaiG<<cW;( `qe?q*y\xfnVqIltuU_m6kR`:d:'¹[Ɓ[bx7_o;QRGP3>aj9nH&n\$,iJP 2xET8Fx<<0F!Xa.3oh1"pe{w%(3H.帿S/83PV0/{_|@: ]R$sb\qzh=!-/VWBS-%6C ec!KPga,:OK8v67'l6'8+w [#ugW)c(טL26مpݥ&©e޳$2O!rMuk,?QL|(;L#k%zl[ ~!'~X-|=, g=5_" NcՇt ofl~&+xy1HSwc'Pylj}-_j1qEV&vUy?+ͪdӎPa{<(sKH ̒FuvFjГíVǜ֮*s:w\[ctݶ +݁բ82J&u$p+S8P'LJ&)hlO8A)B|O؊Px%ȥC-L؏7s;JOre(5A^E}PAJ3KcG21)G%]*Wg>M:®H$Hu">PLbXR#N"QEm' gLskd+ЫOiJFF$T A\E1h rw$oA@, 3HAۙO?^:0F"DXk&X2 p}cLX)y`:fثS8_ į>q^FoإR22-xH[S bslk_z_!|6ZFԏv,88[-VgW-= 1-@T>pC F7F^e%D*j$jeuƇݎ2Ua1fpEM ڔ[d91)"MMbL#Ch,n%Fds4pK9֌+ML',#3[ϵځcO$JQƋ_]9 )MYjR!!ը=yC``yF'JW-)Ĩsic:3Kv*+F2h_.\sJl 5'̆ARR}6q$:4;ND^i$ԵE'l DLm"cm, IlN#T-֟e"%R#IPD)8Dc1ASm,!8x<8HHAZF!a 8! w@D\|%N"&OѐbRbA}L2;Ua˔Ҿ@Lɐy b!.H) F=2}"'>1Ub"  hn>@2=j{Oo|*/r'uhKPeؔ6)R ?KZIkBxDmoqv%-/aj %1&!gKRSI s$2_2hrPhoÜ_`9avbnǏ\+<5v;ƛnG{eq Ѝ\c5D^!ݽ_z5_]ͮd FV-KJYT{,Cv^*6%Fg)iC+OY]S\hnHvluE?'Ĭ⍧9U˃_b47Bf㹝Ƚ"ZeoO'\ Ⱥs¹]hs&9xNp>ȎWE.s\ i+WZ:xM>uթ;FvTPem[ծ=[UN1u(w"8uթ;Fv 4̺oSiݚАW'ڵ좤-~!c[|pu FSY /0 k]@ 33*ڹC=0u. E +^; $R yL COP-ͯZiuXNhA T>ʫ숵^!S:ϕc?o` \JZJ3¹F8\h\F|H`?_MjwRl ZB~eNrX֒tުnP  ֘@?wB8c%Β35]$mMۖ!AE_jJA# 8Vc/VK+Xy@h6e\\-SX )Rq.gX<BVdVɔwͮ9buАWt*wםz׺q`GuQƺr23VNukBC^)q=]Oo8cenX֦H0ދcTP,w=s a{4Uܳ~F5GU]j!90T>hqoT3] S5ӰVHDZ]SO(q Zu"Q{Ѫ. ?biUZu"Ռ\cO87ECU]ja}ЪTD_nʯM{.*){UǨ)R͙XVPcx{N>UDKx}s\RA:04t_s ѽ]<8par#p|)NL\lQ Kr0#%ZtE/C?@ܦf2gu& wZ z/ˎYt(Rd[THŮvVamf[ɂK)ό:]xq1Q,,zqq"ծnE*}Pr1l7RLf$l΍nr9gFCTf*0ͨs,bZP퐫Tu9^6粛sI&k>\67rO #޲4k)0(3ϸYSz?\EHjő,`FNq4"Bb*%K%u)M(_?'fl/aCL\͎QK a2[(%+dm*/)*_'4d0M)wRkҸ6;).i.*V /NTQJttTrmtDwmH_Qݮ4x*RjfnRlHLJ6w5lSHQ<$@419-7)(`}tur&=/բ]-.Jٟ5*)n{ kTRJZP^X-;2,^J霉5jѼ7ji>S\AJi>S< *io=Ѿ1T& <ϒ&Y+<+%0'nh#+D u1#E4-̓{qR@qHn{:;PoHnU9]Ͷmz5h#__߼ݏ[}fi+m uCXYqf\YJ37-6m]g' gcGTD)NEUTo5`(bD]ԵDWL-DqѲS-֪a&V[H kF5W`&P %O\2il}쑔#aNRe= kC#HBc~mL( Ivz^Wca軧gFacY8S^:'`M.Mқ7Koe,7{%ln~˭ihQ(V辣UL8,F| *8'-ubGNHG_mL*kY *Kk"NK8ႃMP$U s!UbVX3ʭ$(,I'T%¦Һ"P(qBWbd4o CF9Il:_FNR# ,OSg~Rڜ.%0sO2w"5R]/[}qr}Vpr~-[ڃO7=:ڌ;Dt#mԦ"`"?ҁe, />o8Tc"6_Ȗ\ď ;/qfMgglJU5nLF× uT<΅_7+u;,5~tZ\O6>IUJ lgyKo٢jG+KZW ?$.|pTop:\j !r{pB&CW:y^%-]CL밧y鰧NuSz2>)CgƪoB5n1n'gcS폢wЫ`FMgȇԸ"[9'{k\c?\K{\)t&"3l~K(݌;#p8LQP8goGsλ`7{Μ/-cύe[Wv1z:O 4}GAg?<##/z`:r De~3hv2v]zՃ/vQReq4ےMM./@[SĘNwn GN)tݚ7Fz6,nltŠKV^ݚ tw;bt*ygޭHֆMbS<Ҧ'b(~ޜ_[lt7} {iˁ#2P4j,={[2s}YR'y|wyYUZ2E"q=PPi"JI7L <ԷcC鲇\P: DwX \ ~p&be"n_\D rmO9%K'ߨQmVlYg\|p(]V4T1s5' (ZB̋]tUmI(BqAuN# GFvZ3wj QH[sխ??oWHs׈ kKߕ =< 5_}qQ Iʸ΍A_ZdG2;RozZ 7:9VêS BuaZmzZ ;a#4*UUu8X^>qV@؉C~ھ/X}QTp;}4rEaE%}4rE-C{/8 2#+ZR שC*`UmzZY|I>Ne?>Nվl}>Nj>`}}DžjCܷijq_]FZ #9vU9C{ھ0J!T#e;*nn [W1ϖtߪі}_\jz}RE@Qiw:}Yh xUT+96Cg8(^>dqVT:}("*zPDZU8A;i#>>o%V˾G}jthaۭ:ҪB='cH 2@s:Bܧ+ijg̉[)vU9arnC~܄K ntG.iv1M6|4_*\F*hUsjf&i8-SdCs , EI@!Y:_؛^".F՗{tQ7V F/j \*Kg؟؟~F͓Y@y)u_Ҫ^eռx+HI.zPk[Z?;fKyi@Zq?2`?$KyKplt f"n8kD}m(E[jjAܳe- +t#PQAOB."YibŐ˼;`ډG7W?j1V vF,zXev<όW^<\sbJ9ݭu»S-ѠR1UIabTk38퀤RjϹ9\\οm-JCΠ2/c攍 v b%J)Niq sF>G<,=tfK n LJ^'Ithٗ?f~Ģ-8uF9ziĜ.cvtWwV7g8N E0W46-{JKbGv%mt!.נA'c$pbQH7UbCd0HYbҌ'i&LHBo/$EY"L'1"VX%ƈ T;Ah_$b)#c4nR*2?,$!"% (7G<1Z5r\z)r vs]?df#U#wbmY:#::[Ժf_|7)>S(ul~xxD״+H:DIbc%ڄ2Μsg7(o %Ü֤(^"/p =D >0۲g 49Li rQȝ3^?i~Of4#(d>1#,:f U<<'Ebi^ 1WŒ0XagQjׂ`)`c,IuqϕkLd^r2?()0,~HhHN}Y3rycd 6TCra;IgeB6p/p ow_=-?MoϷϣ Eـ[V"sGwd*r0гW1:s.V ~E2jxjgJmtvt'Eo헩;E@Т9A9r0p!~ 9mgWփ؞P)#NF7N۟j WU|ffe[@^-0o˽؁ *Fn-Gw\ yI.D4X#s]]U` hz\C(vA|U(gea$Z4!˜jnd4D #Y9dGHIC)HKq T7%,Әi"Źћ9ԻUeqkg,NvWw k 8f,o YX|dn3"cRX^ GVRدeϗy~PjCX([ܼ ԃ@4tAѶlvv2Ɯ ,uй+"HF"̀,IS䜦QDkED&&|K1IШx4Iʛ@@Z^j-㊌Ң\ÞmNJpzi1gCnބzt1һjN#ۣ\؎*Zi9h5O`J0y+"|Iw/ֵJ#^9 #M.>qw]'RHq L@9VBhɎum7 0O[8uXRp̈́yZ&ש1;Vݢ<{-#X`2{@/H<%zaO~:S ۯ_TiuNM[ lp&w,UhN+嬜0{yWVNdP`1ߍ H""{Փ*R'MK.>EJ5jAwŦq?LAzRZ21;eGI){hzvS=R-ݱ2Pd.U#oYzR5}a+í^]TOT׭*P*^juLݷ 5A B" A^>aRSM(:}ä yNoՓ*(q `DwhG"LJscTQiľ[}DTXkdcٯ=2ĈYEqbcL f/"ͭ03ǰ$[ώ,H8/ Pa7Փ*$pRKNaR}<ZB8IUNCKRuȏ &Lr"S +.u+Q 9HURE2L(w+I*aRSNRաTKtW}wS=RM WJ7aR,F*n¤֠΀JUXK?.*&z!U¤ZA0Z9@Q0#F zRE&UŅ8B¤*PU]JdAg@?.UvS=RΡA*¤J)?Q/J0ʩ .ᎥJJ+ߌtK3Bc4]Xъ7m5nýO{9i_dr?o{-vt\/Wg ϋŅۘVF_޿]i4-MO:*6[>"`8I9үWM@r>^!{u::6@+"}X"u95kAǍ9[ ZY߾SS"NdSJ|oɬMJ, |I7mI;8,}Vķa]?!֢Tk|y^ o=?~/N~ۛb&-ϖjQ&LR6T-VVTkv~9H|KB~>||zZ9TLq_) ov>;EQ-NkFb:{9s qn-x,|Rծ;/.(Ģ O-ZBwlٻksV?egin3Ȣ5(} ~ LS`Hn7ÖSaRk]Kݱ:냞ڋܝZð7LRD;{C 0\j j7%<"Ԓgd3@AFe&͛!%ƭae;~oQLiG7W?=&wzS(@f/ohLR0yynqnݭu< HZ4/14! růw}BwC gR;;1xiHatT+gZērjSsRhFau %u=X?PO3:b 8Yvts{;`4ׁAnDOnW#Qt9bxΧwby^QW,u1Xg(Wox7a7/:@k?Grf`=2S [[1?X5a۩VMokV/۬u-~_h[|=jK6Di^F(e1w}_ NC9^}Pp7S&n pǻ҉cWs6WM_m_Шoö0,6AlFTMO՞5a;ڐIj8NLvVw^zz>ZJ.TV\g [y|eF~ﶝ9T%I'/BMM-53r:jmRX3b T{;z d?hr?K>9aћTzm'ʮSSun%Ovb `q'ϝnύ6P5]XY8Z\Dɔ/yڭ)!u/Siڭ ELiRi}nM1﨣ݎxPJns)[2muq^DK/Nof6ҳ?WG#;u->_]|i|zRX:y8yj%$ԁ=okt$ nl(\8p-KvCqvKH}KwS=RMRAvF%aR}[tн4IjJ3͹\d_9s؜9ܒJ!)c`J$dRqV6_}+ېs z~cDw1kݮ‹R_qbcL fqJE[Q#\%Ն?hZ5gf>0dc4CHꉖؘX8֒$c^@ /`,dX58l2 j".krPD7B 0ŴEu5d#%D$ˌ՜GKMe2*ń5UyA#R"˒8Y㵱TTA{פ֝e$MU@n@2SݞܐIxHBIUN5B(P]aAH " L t+6׶ l2 ,3c q"$=6ǵ?{W۸/!÷Pl܃-v/=_ZDuu^coދKJv"vLY,*ML3p Ԥ Re䈂C7IUgZS!wLL@7lCeq]-éVT1-*P2b+e^NP 8)jƺaU=T#~Т=Dv:Zqj. ^ЪY6F"ЪْjOHlW *&Go~ZE`f4eH箋3 ,:EbG˸N5 $L8A5RmcF2:=PtN BHR4ε0ÅۄT *ͅ:hcH*F"j un(ׄũÆ8A $Vf7 .LvaZ}%#uB8 ӪjݟUH~LJڨVLxjz]$L(iAQզU+uP~Tv~w3V(9JĎR](|W1Ws& E]<;߷HTw y.Z Be#k) r@1J"(M<G[q*ϠdLw 6n]OAs԰1xґ h~S@l4c!C~Z&T9>n]z T4 h=ii?Lf_>W*h._tg};Vm{*.hg=!*]C.Ѥ l4j3hT FjiA%|oa:(!5x0*RʤTJLul#"5"#Fĸʿ q=1n_ؿo`*=HVb_oer..1eLB^f} #gN9Őa;;|lȻyVewUyh,&l01ϘNyyP8T q>BwwYMjBA֟ǩ"ZȳA6Knz((KP}>S; Hzպp$٧K.Q;j^@j;J<nܵBI%:8Vhcݍ2lŲSyPH|1wyxяeyz\c_ c 4YLّu Qv*5æ*}Ue, 0AT❥ތތ i\>Յ/S=.S͝kUZvƏn*"tBiUFoвVAV9oDHU`ػ|r0,XǞNQ&hF5fU3IVqG6t'0ʨ_֪u# 0_/Մ y\ IF{ojLXUԃ*:ܯ2|z aǫG[^mI,*Ra"h(5@&NSL0&g5O.x|l6Su`bw d3+F=5DQH?P%X!j+JuAH8U,ʙF4lVd/}SY+DJMKW›{l$[7r;[uƭoUbMެ{VAuϪSGܞTj!&&&(( 8֢8(js'cHx@.wuNxjXoO` t.e@eeP[ }3TF"}O}, uA({wҶ/7RTDLԪ?LhUbYID}´*coNҬ5jΘm_0b>z'laZQn=/fGMX'ly3K}> \QѯMn}fTcUoN=/G-;qe%(k q).;`^z\;iۇQi0a`TzA}]Ϗ˴N>*´*Ӷ}@´dqBT'l0-ͨ?hA D55Zcc I'LL5"s\ڵ}l˱eoNa t'l9պkU cwUe>mׁZ1݅J>JͯV:Wpum&xb{dbaz:{F LW{ÝVüxY)y{}&pJ~rN5ng=^ky)dݧv607fH!3sˎ )ۃd! ;sc:h& JL(Y"2n׷g`2;c-y_6b$oLL VzNmɫ3~Y6W:`ӽ}[x9;?[& oZ _C3ZF{QEӻv kǁ)1~꼊|c_&ʩo,AR roٻϼBZtsCQC 4D$͊leek,vU-3TBb |yC׏.c,"WRIt3VjŹk|Xy*oIJJd$ D'\X%T .2IThQ)e ,Rf0Y-oPMd%wh:U5A:f@0֧)b{iMBR6Y p&}z<Νl[Waxz 6h .Ndq/NO1:"Bт_,I}ӡ><c?_A^Y7 =Nnb0On`~\*ŵWk^)Jx@< fz yDb&e !nP(XE@BZ҈<7Pb#>%Nt1XD^[]8l-oړkOk! YG] 3[26Wد{ V)'AZ+c˔QH 2JXh"q{csT=p5rxŲՑ-Wfd+%߮ٗe-c6NуdJ=*և. JQAӰ1 Bo6ͭO}<bH谸Z9͙nXn<3?B.?LE/~~w{78B4uon/H{?GmCbP[P^!PьPDNנm5URD!# jW(X6d>E-pJUބ"w"AA՗v%uDf|UWDՊTkGumaS=^Zʾ{q)啴J!AGF1ֹH8{ Zxi6LP7L|QXЭz|LvQNp~AKR-%V$:hϻv>|6Zo!].߰ ~+_~4x7qsS}j&s7r0ޖ|+lZmu`G6>4fM [;JG=1GP6YnKq5y|ldfk&Vn6LRY0<ԛ5>5b0>|_ArS9)SMS:DѡB *nq<;j瓛|1ysP2apa"UFI>/@/ӧD/\u#cZeڍl #E?iTKTSh_˻~H%B)iabb{>g<L),X!)4[}ZL^U=Ru+=V+TñQZ:<-ٟ;)\#S tDZB3Mn464QB!ƆDbhە&y=cy8Y㰐I6^ĨUmx@ {06t0p =y@X[#6t0,kO?mp* PW{$!EK7$ȁP闎Hy{T Bz%uУ0w5  \b6 HTnC#WNܪ ѦgY#U,[,Q$DIchB8Q 0jY5HAKk5"Hp/j(Luz\DH Uf\h*ULZcRHHҀр5d;qGBHA#f9 7SX'N R {rD3n ƬLsQ*bf@d ^^!2}$-VȦuH P$+xyl(Mi)hUTɔ N-H єB:J RT CrlDPh\F#I2w/Tdq. 2GŖϓpqAA}MܫRފ\3D҉lYF}aQ&?@%(T@ލBfEF9Uy :{]YF+B,f1#02``а3Ag0uf&U*:*Ȥ(nݭfqdD~Q'G~3?Sr~YlFOjyxƼdj>, YWΒlt567=k,2#~ȳ9\S !45U8%L%F\%,*!&:F R;x6Ktj@^nQ*h+w]p 7uW6ja/ڻTLPX7ָIS=R-l2264Tգ F^JW$]/3Vq|їC#Yãv͔gOb"oYx A:ҎS^oː6tX;iY z.zrNzZEK'³iʸ6Oqkpnj[zn뫏alV6VMi) %ۓ5&hΥЇjHjsSQfs a;1ӪgF<>нUÁOІ9s5'Q@Jy(m7Rda{51P5{պtROj6Ȇ`ͩ6VtXg~d64Z9 =S4FK._%Wa!")u(K# EU(,R (uܦK2ʺ N*K@b sMg-s 4piİPmT 0DoC5u@Pbd --ՔaU+GyѺhҏ;I1۝vw5={O 0u7\`|5 }ii~<}WOHF^oD>u2 fE,zK[Ґ!:%4ya-fJQ[(0:֭Ch"-8u nmhW?رnTr u;۹i-kZАuJ 'mTKֈ sc1x`19i_@?o`ڝBw 5$™_t뀖Tl;mc.۝]eTj-JG҆B$YJM0HRJ4͵ ƚ* 8sl(x#i  ۇĝeY3kEb<U(av9DfZ)S+dY3(<ϒ#uX ӃYBH4o^`S SZe:m`&W$%vU2fUn"fKh,UFvMX ۦ`fVmr2hw}.DHc͟ R&~y Tiy,e)Cn۵ӐdqT? ǨPmJ`C0H2c r'!m7c! PMMH"'9g>Aq5œbdI.m5Z=d]X,v AsTdWS=R-fC}VB ?NJ5RcO 8G5N<ՂR9ؾ} pZʛA{aS t?ؾ.ln6\OFfs\]"!}ǨP-l_ZU\{5ZR<Ն]  wijS 7K}r}ݧXS-ʷwyOMh{Νiه8*Ղ!))Z"CΫk%#]˶}pZJQi}`:}㴊}S˶} 顙J^>ОjPQ*;HʽGUM[x[}Q=ՠ5lE>@zhr9m =z {}]>\CmjWXSm]əE8*R 3ؾNmZ_F d$XbijPmuS B e&qم<*FI!MI\$Z:F`ۗT=Wsij ֿ c8b,eb UjbhKVpzݚ(Bp/T>$j.DWh42,fDPRV2!6Pȸz@GV̄<{nLaD[מΝ$,gYI׿zܾ|iXVY%ϫ1R@%`uoM|'[-0uK;z\m&ʦo?cEデ^:vEb]$ٯ}Y J|ߝPBG,s+H(e8)]?+K #WEt9|I5ù9 V_^ƍo؋|l>sQ7WeTr]Q_돥Lsb"IPfy7'R@Xtb/w+ %xBU1x!JP/YDܚ_I3#Z5+SŸmW 'H6j"Q >)$G{R,O2%THcl y sݦ2`XL\w>uv yuȏ.u/g䗿^ǫ]j|kQ9Q9Q9QߕEd>{XJNcTH$L&9&Q,.>UYG OFpџ;LQQ+gMF?ك# `#j@736-fd4ØU?^MdS)ڭj\s Q$̒$Ylhf aqdqdXXBd,ĜȶP{ ;PgZ,}j9!e`ߓYt4ˣӓoT2"EQjv-ltFwNIH8Ku]EuJbeDPH%\6J$X \7xmo-F C5BPn&%ZH"a7GkJydl(&48$z8Q,"Z0]j?}تgQX;x5Y>935k犌-()nX< ">榎Kqג j)*j kߨg/lmYAjDCAӬ6D6Qy u-kk\OW#ژ]&j5_k弥"@+u/mR[6 [vVL(PJAPdvE؀ͥgW67~6Γ2zG~t}@(SbkWe |v4 ~e#3*`{ɕu5 MW?C Wި_ `HFUfmD7-pf;c~:S':hz=i)(|zs^<ݹtZ6+>cNWN'^S~.v>X3;6C>Ǹ>L>]Ӑd1.KP& Jth ymWqxvkBMhroNQZsQ֡p(R^YjL(aƜf:5e\4. kN/;P*ղtadG[D~ζm8-hTuA nG{Z~ {Om\nb} m~Wj1OMr)[lHXO(+RF֕5|=:#j5K,(!\ECtJ1۱n@J53ZP`T'u+S;n Hֆ|p )8w[F[(0:YMɺ¯ [UNi☽XN"=;x]?8rQ/~߬_}P7wWϐ]p-hT/Lz! ]4sA #4*.qZ jձAer 7!Q=R |V\]y[Q#T4Jl(x[@ 74,M(b\cJr0q$ub,B@Ҍ7S6ު:Vh$' n``zN0]zD:jGCRc̊?qŲz=/.ٟ%rl>L){$HMRZwW7&=<gvW]U$Ua+P f`lhi€7 9GPk9ZsWLCi@XH{cͅFI{n71pu2J ëذZfuSVQlϬM'G\3'si/-Enۂjn-j,nrq:2ss- OxX@K\ѽН9ASv#o4_.:Ntjfn 蟳a^h8T[nVqy/ S=R-<`Z U*`8T&zЪNJ *=dAOl@UjCKjǍGϹ]/ @S=R-: *FqZŨU^h1UAe[*EVqi z*P8U^h(VyVbЪN ף׍8!{j2tkjAT/1U3(f >hqUံ[5doK߯Ŷ4}SMOҌ&!g:NJ,RGQ*w%`uoM|'[mu+*u9fjAÎ޻o`?h=wmI6=o=/U,&cT``L:rַ~3!Rp@ueczton.==쁆$ Cda>NoZTcTth݇$ -1<{ TzZ狯UP`,so?'WLLu4[`SĴibTɬrJgHLMLBR布` Oۈj*LHlfFoVVIꭣ`KɋeA]r'^_v©939͍rΥBJe8F{ybAeq,-7&_op.Tafr2O%P=r?>NXXO1d-Ke-6Z` Ŧ)9r7S鍊F>dgs-{qFq'u¶n h`E]*H({9l&xhި1XLT2s Zr!Q6I/LFp>e ˜۩CҋeV[FG|k@IM끙g1Yy=Y00D.{|A6Jm@}T3~)B=_mF}5 3zq>jfNK`5vi3]0= ]xǣ7ۏӫ-#>qTiAn1X}[ jWwnb'7ؠ&O緳UKT$mHw|> < nMzd{R{rY0:+ >͌w1P}7YVLYl!߹E3CtX"u:hnMݢ Cѭ]4Ƨ\=FM[B\'MpY-G !߹|*_KE*wrf|q[4,/Qzɳ/zl6M~gWW?^S,cC n !JqÚ)Y1:t[Fv*R@1CRLm74fĈCv#h&}Ǎ}0fUhI:-8?ofaaOzYpz%%@x;u#ky TFrw< &l$q76I%$aI[r04(AJw_+|j le ݟb%ޖd(ĶCڋjrK-,M5")SJ~0)NIBreԩ$;ro[!tlۓqTyՃ̲TnzgL ^d-8nCcvD>X%*ծI=!\gg /K ȓڷ @j?uY~8RA/63,MÅuuT匿a׋C|0HDp YbpsNhMsaA4q*dhѼ^_01ɘ A+ZI҈#3P%O1,@8 83")Skw ٶMU =>7Z?ʺ"192{x\jnPTIjo-28ǜbQ -rL]Xd̙`36Sd}( T=Gyan,Q$YB3ΰZJS;'*)f&GybC 22ES1:T Ųbm~pLNwtգI'^)~y}wt0V*`'dr~tƒPS{0+Goź~WŒͲ4Ǜ[ߐioܜmN֘ cbdr2O%>mU;$=:KMr|?.%VP~?5o"|ީwM@\Z@@L~X ; Ϗ S\)b>!6*ywT^Խ'5X^;{mU;{)vol.yC+עltse{U;3b6 An,NqBǥԥ\!D8 fcV&ڻ&l#9 lA|(>c0E[?qq${w=!ml T.U.OW\an> p+OPUuc;}J_v5oƨYR75wCծi[=eeoKDњǿ8yb%_x}4MZ-)58Mz MqjϮ>z2G~f^a-AAV.r܊x̢+jQDr[U%e9hfcEjzn܊mO~Sb2Z9LSBR!t-\ׯ= =f h'<ޛCKPY#:\Tc8%S}J@/kXmH#{wZb"wYj'˨ jk]AN~}JŎXUKm&uRҎ >MhΦwF;0Ҏ!߹ 9! !-RAFv3dw]n-xwQ>%y#xe׉}Ft;d[@tkCs )5#XK/J,s3^OJkym4,OO}ыgi|3|vmaL[:Pjy V)5SleM{`Y"vHԋ:k,ʳ":룮&RRbBbN03s (̫ԠrGghgh%H)q~}a ȱs^٣ K ob{K˜><[ZOl1A!'K1Vu} 0ž1{\B5묽 ` ;/D 9oGhW^=..547QԱ2"p/0/R^G,z, P//lZ57i9KU€}>QA/gT/5`<` {}K wGD5C}Dyrn@{Q^`^%}<j\hRooGK X`6Q6cz},0(wT*oo{}I=K -q\ɂ2ԏ/9a5`1c:CA:u~:zRUO طSͪƪq m.f {Ǎ}aҪXŲ}*pU2:6w}$Ы~Pj?..5a9>E R!jUk+X6RD|>F¼**!0/ei[cq1U+g0Ƌ&d*ͧ&a\./|*P־BΔ^^\+`>f^#39!b|LNދKs_ $Vm7ߙHL?^]zf IXLb\ts20CxhXoLrϋc^)?QZe_6X0~}.0RSES]u@Q[}ߜhF\WWfHi?4ꢦ|fbYI&E2B"NuNkIPUUu'x]JrUܻv 2V6ۻY ]2іg| "OQb\3ُm@=fqvۻ2L/6 ˧ع'C|lSכi7}F8ж߁a`*Ez?R(cjgpKBO9¯34⩗\>Mw=MnvlUS7HiNc@Yb eҤ2*SAeBH҆Fί;F^cZ[>lT21ji1d[~2uipxټNz '\^eJ)i_JV~.>/YOo>9B+t$Զ)IΨ},QZ{ $ALQkA1α#`[g9Q[=dwJ(^vğ&];^㹽hmc"ϸWc,vDޯR>f&_!nͧ_y†,W˩~\ΖYojb֧01V6F۰vP 6ٻmdWTzڳK h]姌d'd/r$щlieiΙ~R)b )zFe XX*;[YJ3" 2*jʁaM-=('J`YP{DJ]ۺy R5QR/c6'FdZK13Vq"H% G AQ:&J;R aC;5+v8|J?c7Cyٲu( ͦ^u ƹb y1G6+YW];z xpT^ٿF囱u*vYȏFCqQR˗Gu\:\?SH콏NTpxÃ}"ƀ{y+̬uq*{'x&E%+dԓj:AVѪլAxC q´I95!gl>hƭi!48dbs~kϝ3=w-xBhd=>9vK[emzI3U!6_am1zyh|s+;o5y` 6VϏ/\NH\me<On0> ё͹o:ڜc#&'HQP.rYdUwCΦfd5Tͅ&ݠfj: FRW(R{RE NUK6,~@xVWhK {ۢ=Swb.ͷ-s'r@4ю'zõ\ڬ1^Pҋ3J3Rr{ϓwձCOF?7w#:*Yo"HNt6L?ݻqJRiWzqTzbg@~TvǶ)5Ea *3f6va!P 4Sp8kRS8,&Q. cz"([mWO-q` mԔT(FܜjޢA/%7 hN5Li ; ?KYR ES s4xSZ@%&y" zLVvG~m:-n뙫eR,ŦNף,ߺXc,6UvC >ŀލc;Ӆy?)z0^TnS6* w;*BqAznᇨ<[hc{lnmc;sfnrX-?vGvT5jOz5vRڞn~a}+%JaO{|s27D4qD:Z (GC^f+G[lvN,G<ľZ8(`0|4ga_%`03eJJe'-`7=Gs1G@ `-me27Nj.}Ɲ, OC?eXĮy3*=2›0Ili~+:[c {&7xi_&Pi1sxO}j w*DWp?ٞ8U=!^bjulIGF/-pJ2uZ Cc1&%./psɦRAnb`O&3JСMLvظYцHҘ2r4 Rbc*u #,F`2ziqԼ@/YXlP*Mg,N4iBj%] b"8E<*sۘI(9tl@z !f]D+=^CQ(B;!^Ϋ0njP{k')VV}!/ۉLHխe#Jsl^^ck P9vD % JmeD)Bkbq/󮟨\aܙ^Rf"fVF#e<[ d  %(5G?ܚxwM&#Fm,em(qշ;)Ozz8 uT^+oT! s7vM[咏3m*fo_# 濗'0/m+H(tqr,'OQFj*@Wx?#~z_(,Nrܶ/>5SY"і_p-%`:< Xh{'8ۆ?9HNjck>Terde(%L,ca8JS0Q2"cXΨ2JңXi3mTjBcY*oL*D`BR#n3 vXB'##yƠqTYKb4x4"8S1!x1R8`ͳv 9xv8UJ=N܆5vk>M]oSygMu=E' )VS^3(_6F5~"[D49גl-!a5DĻewmxK47Po-\_ z %COh/uCQ%]uq^iK!h|)kuwU(9r?s ޷LqP* B?kNd K%m xuLgq'J7io&r2ӪD(M1DGQd1v|Z¦<-K~ԯ$ϪQ#\ 7U)p^zv4O8*T3.x6ZS~VrClVŔU] }ZxB~\́o9꽶]/:1ӕI?ꝏ}&nx*fmRIpC$/W6;a^$P$Jќn:iɶ's\ϩJh8(F1ONl<$2YUtd1)>[" %[T̼Z 0(Tz#6>|q KkZV߾"ɾU^YP}:yi~[͓fI^"%F+߇ұ׋iaKp`wY, RTz rK.Fh -ec~}km[q"n@Ǎ+_Qn@|fI~17+xn"?>孳[g.ov}A-8454FLJ9OD*H'[G AL94J)9ըؔf(t9"jzr2|4kZcMi|+{ؘ9rT\p8IX9pJH}J0J)9ʤ6 Xj 8+.i{wz/ѽz?I0 B)E{RRAood "ZYͩ!YIlbc堩0)fHkF9f +D (ys1XU+b"dy;-EB*晰:H 3L MHUj$7"hC Bs A7DsɷEs4X-gc #Hc :[<NC*YkteWgScɤt @xB_Bb$Ĝtq]Q- ˭ \"Iq+,l7zfz.ڲ;n;MMj+KOT=Q3t. M=rE~amt,ս=iaf'\U<+s7{\>Gyld5WipWc54q 9˫ޛT ]C3NS*p4;L .sZ: <ҕ{y0G6z\O{u<_v[8wy) T wf]ReQmBr )M.[ |L't:팹թyFOֆ|&bSCMBec:cng\XOxz6,+76%$Յm^4qgN̍vֳYnpȕj6E-xIY<=uks=v S 0В\.ۼ3Gam\3qz39oCtoyb=hKm17v,ʛΊ}2hC}j=usi2fܬ3zN-odWR(9-:m3=hhL~R2ѹ~EŋuݒE-~{ 7o3? AR͋i1,"J9DA*"-ID~oyׅ..%(D /ɩ/@ DL={)S=UQOŜjXUV<+3Kpk $疃3Lř:FCNt yai\pjIJ5߯U-vQ\s\W Kp(8ԅ46|qWPKY_>LYFBCYG|T^dE֓эgecMyL$5&Ɔi 5TitIDcg2I M\@LUX6w_m@]W6b0S`% 6S..D6gʚ8_AޛJJ\U]Vo#"@٪{ 93=]D3*4PLͭz2u<oѝE!9D( ʑOrF(a(&$09XX*y/Q~Y؜2! ǔsʤf)h`FcLy?AXO"&v l+cV_X-FmWnXyf=cV_m[MWuU42R\2a9:fն1VuU$e\Gxm)b^eWA9#WAUՌ^ձWaU3< 8Ϋz,g ""ΫH1«ꐻ1zU^EAWQ0{kA 7x>o4-/ھ;{ث-cD: H֪~ ŭWϕtYnMإNX ))uK )鋖) &H5ț+rݖMLe1Q)d97δPr TIRǬ&Ao h$4C:01!Hj !X)%OҐsa>0Gq ~±ޢ`)"{aq8+n\V7Wɖ/|*'n\oa8Hb&),NxC#JLndB"-KC# }`]L j >ׁ>.֎dv m@K;nEg-Gg|(-U#r#ہ#Ӱ;5U{i=)AP=%%[w[@1YL|G%%8Ddw:sX%<;wœ~ލnc[*1*ѭٗTn薞ݚES| <Ntem[*1*ѭUg~#25!߹)a㸭c:1OثsL/!kc627TviM[ e-qb0E sT~ߒ{ 3L 1P$8W /B7%b`0qjKSqrG^p-t_^f" {U&LH̬D6 Ǒ~ݿ[A0AEA z^U mEd-`aUDۧ «jJG!ΎFn M@C㺈V(s7[цc/NݺCHW<)|9镂S =.qzn)Y]nħ /bÁde&?Q9z[p2l=%Y֏hpaw훟ۡ-u'̼邚2Dq> :V_X-ha"Me]ĔD!_ݺՍ{\fdK߿g)JIhI˟Kw,#Led49^| a#(&7=Ś#\,BB.HZ@y@n2[ά3P%r& !$2;%;{rHE‡K,kl8\ 7W` qX{\\K b 6.P@:T7\0D"^+^de ( 4ww:ދBET7= 9dFG"ǧ"QBqK]0lyΓHxm= jm!e[+H/3\2JIKd?bTǷJOUYw[͌ʴ;q@yɠq?%Rn;XFI\ګY$]|G,51o4b:&/?uJ8bb}㝙5 l;t4ekqxx6f)rV\2[7UG,Ko[)dqU&5TA}r9\ $U;Ϳ<; yS^x0^.3xxs(|)AJ 0pJx|}=6 RevyMRq^[c!PASQ̍n܆nԇэ3XqgO׭3`^*s>< P P97%hv _Fi7[_|p9tbݍHL &.R"λ~>|O>$I'ܹ'IH<`6A9?lJ׼Jh$Z$k)oF#"YU@їÓt?iyw!C qļIy 5v,b6r.H;X$9dK+wLf$v21-M[ffIV*ehKԼڎC՗8?T]UѺb04.'ϟy=~|u/_ן>l>[}-ZZt:)4ytNN)PPjk;[Tf=7^K{W{AӹLNh ϶ځvCPM(H8C f[Gο#C (HxB\m5( VKhКʬɥ1[-u2J9fM5FqЈ{D@zؘa`4T탘QG>"GK(%#D}l!׃"?cG^mi) [uZL ~Xy؋W%Kfl\kݾԳ]aɻ?~p> /w~x䔅 'NqQBUFj%XRRi ?$ꕔMءAzd>ϖ,&E0Qz~v)\>48>Da}Jr  2ײ~Ph_DlUQN!c @BeN0%{8yh`5ҖK?40*U! TƒpBv:b{ϝٜ`&>Mޅq N?;[5>{1c%{Ծ17rS#!ӞwWuT.Pv 9%mpq׷ޛ_䟳_$x1xd;GQt܁K9hƀCphR.?}DBHN7[n1˿um3HŸͤFO-Ȅ6fWEc*U-h[1o$LdEna5qWWcTE\ X>&c5t8ޟ*5EúCQzyQ}C$Š26A9 a97)>$-^ya?tA7sD?hp3䎛f)pu%mqayuR.k1n,wO..5f$BqQ1Ix(E4Es=.8(W8nhClLm'(gYJ J6{5kgYX,x7wLJ}h@Z,=DvqyV!k<Sȱ(겜M+%ŀ>$b:9CE 8ʩfs"Y()sÐ"kpC594(.% U9ǠRc @~4s2 thrFB 섖(bTjm$;HTǟz9@RM+ vHDQA}ΝV1E."̐o⭾ڱu ~[b^zqNbMT|SR? s} g~gV7w >G10^i>',' Wk{~go@*jY+LF0w4Ȁ""yKy?ySA8/ nf*U\wpF,~VWmG*RDJc[P ﭤ p-cˤUEN5b2π"#͡(Ԉv97xǰ4͍y4u"e@8$PA;=9yhsH$M@V@[)cmN/Oe"%f\R)8G%,oqe&ׅcXXY$6!&zlRmvF}~ ۖ`Rtu0]g?fqIsw/2'y<:xu#70}$31dX¸T:KpTr%11-ԓ|(_N{ξQrZ׬o._❓Ս+AveN1)o#赋o7lanN/n*cػ$W}J=9 ňK0hM[wioEU ь!sPêꧪ~ov-huh{W,zmd'}Y% f}3[#]+|1pW?<$S; /|%ÜdFt-/7,7vśwz^4<p'vPd/q7@ZH"E (rČ02:BH'My :jf؄jJ``c}g]d4A8VqNW\Km|8>8@hJ($qʤ1&.IxHp~,wCꈀ:.Y*5ܱE{- nc]D[Q? nLcȐ"2( ׄ4θr bb bMqp5d¹:\]UhXQi#[AεVHa8#x,#XGF^`){(6[$[2(xq(k[:qĥu,D~֫ӡ*ҩ6(%w%ʎ"nd!#jKK$zw]A=Ӆ|ƙ/ͳ[X>H`1xz*BE+;GtM7C&/tWD%Ei~?;lgtY0P>jOs/X嗆Vx^U4}_i#^!jzT91v<+~BG]Z}sz[vQ^Y?/|*s#E^A髋5)Gό:w^GW^(zv/0~y*^ϥ^gR8m%,$ϒ*9T~niX7rH/`Kϻ5W֓WMbc' z1\> PRAk1 n@irʜ*ȟRAoK $+)̽by8f_B3MX-Su37Qqu~'Y5ӭzcvhljDIR;BԥIFr.b5u &fZA}}Ch9& R USh"c*Rg%O;]>cŎL^V%x+ *+[M@)uzTWZbNޞg9^kYrcȥ|`MyBjGw MX15zQhX=@1agtg+%tT˅҅ NIيR +)[I+viFnA]3uziY72ce}=u$* G+2CQ+K1"[H):0';n9*! Z:N[UNQK zGB_0ۣ,9k >8 ӯ>=m*@zs߮"@M&pԓ'UStc:uMZdނn\pzBWWnsds$9a~a}5!fΎ՞!;̶QmA9x Fi{Ù)"J-翿wqjJ<0dvW2`f52 ߦOv+khbde.SY->^ѸbT }s-%.?L ^oiDYY.a '^/'%Us)\{vTk|l4?1Z4q7~_̠!t~5 ] P*<WZԪw2jP}-OG-W'Pj ̼ޮr5RFWɷ`߮"˳S񛙳>ys?X,?elpeM\fx.fj+AjYcID7sG "JYBR{F֤#QHTũ`$)GV d'n8l[ 5,cdp2ow,z3(J]#TB밮%h|^c}*HX&vzL'|Ng;{{PG471jCϗQOʨ]X`]Z􉄜@ vR_F=)sNjwRAY)lRx}r )xT^B=)A}>8`>՟¤50+P DoݶRh,vxh4Cm 5(z݄恳N>8;PsAz6qhiz6L'LU nzRF-LV}> jzRFWs@L?q˨'eB~e}LZ>s_gBvpMd㾎s_XD]6;1 fjAD?mDGi!)'=jߋ6&!:.˜*ȫqԓ2j!鹯9C * UA aVŸ´@YU*}>Vy?ydfjhU|@P B/,/YKAf sSVF *U֣nspVKގ9/d=5}a{PױE׾<@+zlV:3@+MrI$(7_s_R 1hRR'ya^4筢4щƚT9bYᇣ SEB1m57 ͤ&A!k4 cxGE;6hA-j/0,@dl ¶PP_};en~!\8Eͮg6vWK|{>Vp2V don~tKyh"3]zqpdFQiFZ@G H(740wf=MS#}&H0G|{A~Ձ&݇n=_ZD!Բth喟nu" { OU"lH{RII ./E0hQ)KghU7xwΓh.?7d<8 a>u ]qŕ_%啴l0qҍ0,F~礕=o#Cnoش EiN)ƈӑcZ{}wLAgPVѯ_\Y1Ng;qqWWpDA*[t-kBɫf(! ig0f"ڤׇd~fR& Kz-P*άOHTm&ֲrGVlpP\j3 78^?JъIEa @_}n=}z-rYc%"G 8I%2!#q"Jij '<13573t:OVzut=xkhq rtmlo왃'WxhfӤOQu,@t̅6bu ƜriP 6N&)KHqHZ)8D%V[ "09s 4L$cj E(ta"|Q8jIb14zB][o8+F^vgRKy,;ݘ٧9(=g?d'l'(Jnщen,YůlޯgVO5urW3la}QiYLH&r%S0Q-E,%Zpk@3# u!.DUj3YЏ2-eCa c9h;F"|۹aq=X/m.V'f97Fdv W",)FE$!9'Rڊ$wOi9fw PwR 9Q>lf߯';MxL`BӜ+Yqcyv$攐Ď4MY0y`Z P1j*ӡb^qQ_~v1}~NYBUcTk ~={WaH)~r;?x6ů\4tOu:UzlOrC],%Ãkśy v4/i/T 83٪(*aUYUA50qgޯUj* aUT#䝬:̪.r sLz:ՠ)תY8ERAX0*FMNVէUA`p(;oCu|<~+PV։VUub^QR~* ?7V螎rq끳7 eS%эٖPF?Ntymz}UhygGv8]8~],nߝ;&꼼RhƦLw6_TEaRgky^~ΑWdE9IfPͦ.8 /۰q27I-Fz zvì C)g@*aVUPɪz* \7Я\WnŹAVNOs׋3b.|j panUo(fOsF䬣ƘO}?t1v.w23 !񝸖jfRWpUAV 3Ouktl̞YVLK%OlM1TҞ*b0dw?J<< PvǞ'P;G"Ӟ1 U@z\{+ۥg-w:٦wyAmqieP$aD톣2(`q05&zlyz_9Ćs 6CXQBǚДDk1\^*-;'8UK{ov,AׯIIZEMksد\s Qi(_ޢ$87Hxj!,ZGzxo{?#4ԜfDd7j{qk׆7݆7_ OvkH$ci)]N#ZSy^C̓(/h4J&X+;=|HABy>7Kuxt_;~.9/lΞ[ )Z?44߱/7o7`£ Bt2-cXK8xαo=ұsZiD?k2NXKǔ d^tN]Ŭ>.fc>O4Pn'u.$ msY1yq6{ϖ7%:QPK?F~^%&i>ônb7Leѹ(Z~mԅL(МG錂Vӝى?avSu`# QIܩr ^/63r|Ƚf%@&ҾdS .<!ADVOQ}Y Sdodx QmpSp.Ϯ}(Du㳅C8G -?*_DOXN0͈hnTe0xh*ݿF'bc՘r'*u2'TMb"H<5PD+烅3 T/0!H3[(WYX0(jBAZ< cyLH%$UBR7^Pk+SfD>̨͝Júy6VZ˖I% w{$΃<AT:M;4V$F)Қ!mdHM(ŭkCo$/ΔGkFu[N5RáwIznsLms"0̟'cPN? $?)m4hB0vʅGK*,FViExr3w3"(Q'*ZگnUhU8D}6Nvsqy3]|{8'?rYS45b %bcëe^4!}ǐE<:yΎ6|>lA>/5N/k6ޝ[TicjXIfƉi xkGT=} yN<ëHV4zʻ?j EykQWzx.U}j)'F%<8 7ׇfk,wZFC!Uֹ0 k\d/.䓜ENĄdp$p'c*BJzԃ[7+oW4YRȚd"IdpD5E͵491*Mm0]싢b_^hrdP8}b<&㋦,qLfeH)P) Uj ,&WAT; ЛeUA{y4B"eU;ĨŨ&:(IRXn3`y13$ϸ@<׷g].;~iY\ /o/Z&p6+?`5@ S~`[{8'3hoG,e '"DΉ: qC3c`b1_9@тBrh㶂ۋl 8lZ=O!?pq3m0;`L޵N0iTql`2oqP{w- Fٽ,AM JF"0&A5e{_D~ɯγ6dHkظ{N$k0 u[X#nO+m wnORV>[%?6-)}Bѿ%O|9ʎ&15ZE ! Z"X/~B`שkky {0v1P=.*,hY7mz\2+Ӊ}Gv봆vH_ݺ7n16qny7M+E6Gn21w4nEh 7#%лua!oD[۔bwalǼ';cxA?EejץcaVw blOQ}YOx߽[U%MI%v;}5/ܦ#ڧ(Su ٪/&-*SWwbdLlEV1uYJR$#TJsvUR/JR cC4U #r2{xȖ=I{NaZ-b58hYxM !y2\1@ cU3 VZPt2TiY0JW X4H9;_ByAj{ye*G9O84Է ŐyZM)`uCMMz%$WI}cv/Sr`7dzpptr?pbR\ C捀 i.PL85Ry'4Qs \}껃/40˴Q n ' IgiyFs9љH1(}QNAX_R*d<=(1+6hrNx$y NC))ЉZb1UƷ*ubs#RdHNN1IgGiȖ Crw +vAaCW8`g|1_2>4zZ7^Q!GQE#i4/<h+O 1kVlzq`{֧V[0uh ,6:#u'c17-`QvPb7R\?ֹܻ k'r/r^,'r' .|JR^|C-5;YLrYXܫVSPu"]NqtwZ1G.Žq cq&dέxXFXKYpV׹^ X; )Hk{ ) @WM[sh1t\9Ryr ]PHUh. t:wmד_Jb&CrRsr?2ja}Os{NWЭ]]YjOgM!dZ1@YW^ź(Cy64MY?!R6!_){⽠m}Ft;_m;A6?@D&<+wZ>qMpt}Ft;,BpMgV,+ݚE >ʼn>&m|r8( |k`2G0TvQJ#1).F!ZiԫL{Gxjb{l.?rNe20toIs-D{_ݻaQzS+RᇇWfsmWٙFGyp;ڄ+N=<~3G)Wq]45u l}~]CɒHHOp`kNɡf3NTAP';n, vY=;p,MlA,EW60`*v~%ZS݄slEJy6>}Lf%, kF# T>&Cxn|gsp;FF2fqf9oQ,;˛ Bys 0=LI1抷CH+ 3 aLnl{vֺXFC[+pi[p#(EXi 7Q2僞h (:/\B[)G_! \J;b|!=+F)jjmnV=;܄r瞤u\ E#*lD 7AX&BA (paZcgA\c9ǎ(Tc}Ʞ;n!;^|ROR&nv9'z8 E&!-rH+&)c!סt)cd^f\HHuԈ(i juJgp!l9F3Ul>icZIZIIGAJ:{HP,8E/RSRO RsRNS%݄3^EIXl𪶽Jc%a%]RJ2ב{Xhz->ZRUX@5s/v*GՃ@cG)h>1K/lLz>~}:a̳8' '*tAbB$3ISn&Q_?MGajn/Pnoq1깡V]&8X:W9Jbo s.ri&S9#N2`oTBf|KXd9H\/`S&_Z=pjiTjӳD62.11\ŁF6L)V:8&gG,JI-unǾ\cʦK=-J@ }M<š\XP;%(br]3I*uNK=-Jw9мZaӢZ"zq}\  wD/RKcԀ}b8W S|U^jO2Jڛ^`d4/MVNy_' Բ'Tj?=`%bmU_ˆ^`mUz'\e}]`")rV^`"R ɓ>`׌R^4zA3u}^UT>!lFR`i`a{}h^ y>Y:"/H=}&uCw'^*R{3z}襩Ԋ@- ItT?OHWRh쓚R^ >iR 3J;I^HUHO 9*)0ѳ 7[rjm ;S@ Tڟ7!0oPNe n 4kzqyʶ޴>#X8%Q£qL V;k'M8N5sh1P`E#KMGrJ"*'9\[tE\eʼn^'UU^jnݰұWi`̜kSJP: ڱyϢEnDkPJ`+RJ+{Tjz(׌ mx{q*[ w':+" c FPNpX' ѫ,^`0DJՅﲱOJJ_,#楩Z@-)lBq62R%hS> )z_*oW5`_׌TzVqZiQj؀}}pKyJ%NK=-J:u}i^>TU԰{G]4 cmc'vhȐZ@OWVwR6ګѫLpD%u~}ZQ%qΛv$qΛJ>qI]>z}*ڙb쒱O\3J.]IQ}8rZiQj%ݰ%ѫ>j (l.,KĦ?hܮUp?O%\qlo}/U} >/~[կtYEYU4|/kD ]QFOuhT-r촳!=?khY[m/6ut/5^wSo to߼)ތzaqz!0Ic\&Yx>j$ݙ$ov3oQrkgxOuO@y%X0{H{crl(bq&,5H(-@$C]?A*F]Hއ;;P8bgӬ'l0G,owhGSѻ0@.?<`.f:xi|~^-ЏדU|wc0oF3&"@̄$&5B_-?ͣlY3ucbT_KvW]P#O8 exmAf@& *[1df2<!tmS: NHRQJZˇ,:;p8" #bX00E\6!B%L$ H+8~EIԢLHً.i{4p{*?E?eIms[C%pLEl Ugd7ƶGA9lÍ${lECsfiA.?ElݨbNOpמ^[W+^Ǘ{bz8Dg_?`6_7ld%YףlAM *X_w)eg'üKrD rm(_O=xS͹ }JK#u[=:x}4!N%LE;H'od.~}E=}@DGy9Ct,9=\|xyUI670_CAy2I&sf빩۞oU@!_(~p⮧Ǵ]ulZTsfzr4{Zəxi2hO`ucCCMī?af'4K T^iq2bQ|vѓћ"Ȉ2yKp+kX guk5&G-wέxXS}JFݳڲx]V,  p^s~`8+=CZ.FertcջzJk+ UpBΚ gۛ?<7[Ny-hxW s¦k9JPTvkZ5_z)[jETZcřTˈı/w?~i]նzuk*KdU 8:iAj-,{C07LslףdG~U3ݾnG~uo+`zrW7ۭC9\no#L6>mc6UWﮮ=n}F&еyG 7 ަfjQ u!dgmc<: ?{49>ě)<+w:>媒wӜ:uQΗ{Ms[["5!_)[uaYDgj{ȍ_]bE8l]FmƒVmv3zz^T7I Gnu"Yƾأ%ʾ8؇DRjԠo؇ĆےK ӓ}HB; u%bwP&boT>BVR_hOЍ:hu;CƽZ-KSa.yHr sX˺̀F;eϠEӫhk8RUh^e2čWyU)5: URмJ2b^%ͫJeUW UH̫̫IxԦBhy (4 M_\wb}AԐ:RC>_{ 90ca&H-}t<Ì"R(} xa/R+SaJJF؍ ǧ2z-z邎ֶ9T]Oͮ6<[r>n<lpf7ۇߝ,I,gg~lsύlCH~,fvDv/W^;W<܇]MSNӛ%JzW4}+5$'=atwy-a\MRo!LSmxD=$e]j%؇2lC*(˅,c*04*FŽavTVGŀVM«I}YZj},m֫:ǔyHɒU@)_)h(GlOٽ؀]M0~Gsܟvntuaf&H#ҝ{lotL|q}} +|$WP# a$UؾL~ J }QzB9Qb,R ;(Zi.`ixo~ZPʓQAO&t)f+_O"`-?}Hu&yi~\NvU{&,y1HȹPϦx״& tv?^UV(_(V]DuPxc5xd=x% ljA2l.0w nvoѣݨk#UO9+ 8p7*Q+:ݼP8~ӵs{RڍkCM16^6Nfl3&.@ah4p9c¿8ZirTJ`Fm Ms3fW 1]a icQ7ܨ.{hoCm"c Z93^rIޏFzl|e8\}_t|/>=%һGzsd~SJggT{cmy.iۚRj)}em 2/I [&u@_GuGHIP=`&)|) GKM5'XްpV?WeT'Jf}ff3Wn͘BvlzruѠ)Od$l3__@qr]Rʭe| gf7ՎT0A5[]=wmhuKz}sG5ݭVykW^i|WC}{F| },*iO"hn1YQ+Zp_-|eX`)E\͠ G(wV%twwC ?{F+a5WT*dԔ757U21𦶐'rckE ɯzbYU 3Q֨퉽X]ӬUy"23u~tQ=g䋍?pԴ>MsCF}8(196cma z5gu@Tx_pMfDž& ugeya:<uKhH |maPYl]*^jcRc_.L('8`XK"ys}ibJjUY|.3yvϊuXmYf/a3ニ,",w"],C.S X]-acJo$:/o}#/dz'*ܤ}SdV?O;;|Ci1v@~#d#y.S-l+u&4Sn:N7DmR3>Xt F.<䍻hO3έ膼3wiaFm] `n=ܭy.ڧ \y4;[$xKpt~p'&/'g&wglp@>[;b`NNQ_u{HE'-.z6΍I|8/J@[_),A(iΘL6+aLs1e9"&JU-D0˙+^ |(QrcbIQ8.X=X%4Ѯ(I\WI↤/G&a XvZP*$R[n^5Ws)^ J)P^.ԕW UҼ YIx4R[3g{Ag x\%YV ~rⰤuINgZ5t&i{>gX=Y45`$hC6l23eQc=<`U[Iƞ#F1> Ťu ՄaIɄVPSYHsjyr4E  ha*=?޵Q68/:w>/P؋ 6#h/4!0ZM%tЭb:9΅gLTZz1 ʀғh Ĝ  0g"ӝTQvG2T jYf-b.!L,ESZ9OB}MUP괇z6˶H{.ZQ1)p&frdBi4ɤkY(2^c)Y&yRU_G~pŝ gaORf^Q^#WWsW$3,H}l3I5pg{ 20NF%Z\i ͥ66/ibzݨMr76T/=CƇ 1ˆCR_֥#۰@L,)+_Ԡ k,;4Hgw2˙H2_S+S΀bkf6!HY]~ڧ]ObZYv7S Ƹ2?[O0siFZKc~*c_j΄hњ}+壹 Tﮗ7j JʨٓH[fZ ]E?1'ҧc,0b:c|z3|VLr*P(97՚ob0bmE;m9x|8JaNli?O# 9}~+3Lk5-AK3Lլ@ F'5Uy YHЖ5xYK 9O{'܊脌[J-*硉ٺ&cFaikZknB~,6SQ7%XoMf*_mdw hD썆KX*PdRvRK.7.h/5pfcJbDEHmP&tbdVm:.Rk^6Vho4A/z 1@|CDC޸F ub$w*,ט[]4ʧ&a*:N7DV)nc@؛t)$I}YZ~dW xѴCє7$R9Y邠D}o * )>$+錓)0rE*4m.TY\h (YOݳHw2/R[Z;&@YR؜A"Gr /!dqYn+ ++ohiXY8SJpJ9`dA$A<,ˤb<C hm݈)\ B(@Vϭ_r?Y& Cu#Ԫq,x;$Eg.+bԈ``vNR[wa)?i71-CW \kmYTL;yJ?ЊlR1%R[aw/  NlivR^*}njR LT  GY6^q41c0T*=`Z#P$4O(z~ia/[$MӢCiH}S-* =W!gǿ1*vRxum8ZO.oj'aoC@]6a8QQLc6v|dJ>l 6LՋ^\v}Zj7 ![q(mce]j lhC:~ xCieO7G|DC޸Fɯ$M Btub$xVr[|)1u!oE|(;Ռs+VAK-R$o4RqaXw,w*Z]O &? osZvq򆝭_e`)uX˺ٻmdWTOx4^UT&Oje٫n_mHQԄLS&ΡKK+8T1pU pV..P%U\_*2Y=Z \-ps~zV8pJTzZZP-áJr{ۅ寳߫^'1[<6wA:Ig8[oo/hY}-ɖ|ڌ}7k===.Fe2߬6w_.>WmbC^fJ2:zKE0V.tr qx?kd]LoSrԥ]<9`).@!X~n'쒍Ä1W,С5o΍1Ym묓\tB,8" uYD<:ӈcv5f ֦tL+-0Ztn)1?Lҹ& %՜Sd{ݠSI#pba٢^?(&p"??5u1fđu 2 v{.;1ngf'/:DK!!!!rvtڃE (IFIv E) C'qZ~5)H`/'ꝴbI,nC(WEa/ `;J* MivjL 4ՠER2֍@K{f@6:3S,P &.FO  X v4%+?gp,=|F\N|n5#dcG` [V}a˞l;fFAwJɇhø݌6ES.a;A0)ն;t9`z-+sK9-SIQEb4[_%-j4m]Ɖvn}C{]Ӫ ֆǾYUz| "_{jF#$pm1\BaTpZ- 1HJaq,C~a FQkU,tܺ 7)aƴ(g^P|-#{q5cF9򂭷(7HT$Tu*DEIRN$h1JH25\0I8QJv}]E 'KR^@ B}A GLj>L)O)9X~R= ␂h`P&by6a mȓKF>͸GeYݢBiCHs&4DK[2#&+,y(EH\VlnwTvǁyneW A)y*=aTy^Ŵ md<([Le;"Mʌ#U<)]ʙۚė,i]gn빠XiD[[U<3EkZ;PVO_Y-&s)—{n€3rPn&[f50tH{Gt D&jBzoj]a1sƋGD85םـu9ّ+z\{{)=/{ 5`: 5PD١0ejݢ$UPùQ;rsG8W3C'p(QF >8zbN#\|)#j0 wH7~p Qb1r ƇC3-s רj}>\ WcJ ׼j C&ScHSCR}zZv=psHRoJCoӪ`S*C i}VOV 2yϜƛIӋ}VOVF3zrHTI9ܷij!pE#Qʥ_j HzN* :Xkzu 8RR"߷ijYF cщrwI뷄 (.sPh?,غ,پiVʗ^) %BdNlx'%u֬IU58_b g]rxaçzV]X PqZ|,ƅ V9ZEI9ɪSOovg9ԗ.7oטhWWLV*[+J,g oYզ}%&]HgYznSr~dzQQ$ ǔI9A$JC;V{KV$վdAw(pWkn-ۿZjUpoW>Ciw 3JQ4Q5X߿U2w3\taLl7yՏHId8c; J_]|z^ wDA*rqya۔x~`WaȈ 1Ls9 ~*ȉxs-lA|anYEV1Q*$Q0IL0D 2$8⩁LY5^l*D#ræyo*1D)1&ƨ>[98 0 ØIWjFH ڂ$F82u҈Y Yv]j1Hwe 9R/ϯ~e2O]Y?YŵIlȒ{A>?)f1hq*jT$ Q &J(&2cg>rM+)ZiS6Es>+vVc s} \O'ősa.,*%rt";LngqYpeATΒtU:t=]R#\N¶(=u u v1qmS͂}VC֎E;1uT$X^bcKaNw~|H,0/!=2dŢů>7E_}`O6yqq;}il/Np)/4qm\;Ρ݌}~JvNo>ݕY+|ƳP)HJL?Tgg>yp!FڮH?oM u.lJM 4V3ʌg']im('6vNk9Db]Zo|՜*B"m| p!Oj L?'vfzQ,hio/"?[<=mٿGylv^ySrqUSz?hAW}jݛ1ŴR ٲM(dcmIsg,OOQOO,nu!NKCH(c`>@walLݚC[C R}qf7)Yح0iu픫* tn#٭ !z0DR s9CʇԹ~~1y6/*T/qb& ÝsV.Mb.ݐ~n<U<?,/2KOQ7jCAde75-+~iL]0]&}n4lP?oHqu%^p@zQtR!9JM*'Rjn0T 8TyARϜ8ru]y;D^pGNTbV5U:2^pHT,"jླ>@NT>@NJSe!2Ջ*р"Vs`0pYs@ck^yüK,B ,rnTwgh(g\B~U|_܇",ˍ1>\9Z>u7 _Ey/W7.}r'8Tq&Ni/oӪՂawRu@Ro2nEY>+>\9}}\?„ruSu}˲ȼ{d_j$`m*Kqg4 KHX)` 4a2%: uF,R&Yg!jL SL^@&ռ՚`U9TŔZÐV Ѭ*Մ`vD CJF!Wj IԾҀH+0M$4224 ~.ӇDwMpG(ÁCP1SZ [EKi^ԑih{JE]_@ձZpr[=}eCÎtDv<4?^!1ײW߰w>V:3Xsr0(%A3I3jAani#g])z*ܵj U\dIs*\?Z:EDv厚^JUjU]J!P+=@BͭE#8T1Їi T1CZH6ݢ W=dk TqCώP2JyPn@Ó՚Fݢ c^έ#U힛pmó=-$uoWmn2VU@.XJ;k\O\tQO]qwoes6pR0u%wk\= cvqk [BeoBRQP ;ePiSeVٻm$Wvoexe/$/7@/TKߏTme(QRvySbY|ʷ3qoQQ)= ׷ԵRʓRAOf49=4FjTI1!׾]ύ^Ro\:UH'cm~ho/?|t{p?媫ݪ"pBހV3E+J \h3t!L(+4|cq^?>4xƱ˩(9: RԎ/BEx*z\{VN/A-EŵjfZ斘ɫ ^+\W7'E@8P|ڗxSU[ %*nfwI4zу[9=Q=CZ/{Ʉt$(՚ )IWiy3D{+!r- ,4VҩPdmSY'(3)9JoVgKe[*@H ja ѥRsº+OM S!ܨ-1FL$-4c<]c1Ғ6/>`Hug=,Q)+r  2VRR3Ur ]r0cU9r#ؕAjۑEUDqM̑}|t[A 4{>w$1rvID\V,/) Zt3EPY -REVjrf 8*N10j4-?-ҥO]rgYu~zVܯpo^$\pD%4Z]nmO?\~>3`韘5WΕ_-yX8g| qaO>YtBڭrBIq6[w˿ՁɄ A6,3΀fkr>r.[ .47$7/}wK3M`%}0 1`B2|?#̼-BPkS%ckz v?5Jcc"fHή7˷.Y7s_7+xv>+.l6+/㼟 0;J).goc@V滛}5U(6=n Ro^ _,W1 ]?ƞ.DËSm?7B`"F*#ZEӣy yY- .3h쮦̌3`t58Bd$Tm#4@i@eG<h1!\3W'tvr)ZT|0ht=Ecq`46V֕OwR_-l}~;̟H,I={[zfqYߨ :yvcXЍ<14 fZ]770󱒔cJ"SBcc7ǑC%sRaǖ*CV`|2dk;Fǜ' BzE{F$^ٳtFXWp&htFPf.sk1O1/5 >|7Vِzwq qNT $ ƳWT@iA*[I`.,!H0Ôovv;ClVfB RdTAZ΋mUʔR@b2OTiYC SYܔ䏲e)(9\P^%5! e#)ʋ(vƱ>O a9-x1׶Q.@Lf}}a Y'k*EKT7taD@\HXj{ўb$ &H 4C+ Q濅>=M5K5sAܗW3~3 2 h%7ZJA_zFi{c a؉|8|%Jȇ#n *6ؒڥ tTJpxDL;ٕ3+\AXDWaBa PSVZjv^x;ZY# r\aV\F|RA.j< ^Ӽ-_kנCzWbDJ" Nɇeɲ[hH${յ#am=3J@=)fw9|{=v qp)vZk8As Jct'j:ZB@t]@ wfKzc6iԄގ,хTݥF.͹N uڎ.m.M!bԟ hJ_,!dghwe봶, a!DlJOn׊0b11onG%-R?лa!Dcl c I0bb:hݎ8bkޭ y&٦|-4=ކ5b yN[:gAf9E(K<,ESjD$=fY"sfUQaVuHꋦԂiuQ ̪*gg61;awڂOw3Vw^ACԸcP^ *Z-7W| աlTooOf4ţ(T|M({8˺Ef&r5~#w=OlZ g, sbędlK;DXC L$S5#8.i mw!x EB.Ԉy %Qr+ RiU! JY&R*2&g["A1cAYVsلI0)/\d%\s,d )e2Pj $ '.m)j կwfMaa/rtPs-c|<*!ytjV%cURg8,CRs2@fksy"dm4_OA5 CהWm3h%Y)!esgy]$jqxǐ/|zRtǢ vBxE_T/A]/y]'Җz58Nakta^xضE'V7ZTj?DU'2 sN~NF^k>$]A%(+dOOںe%1$xΙ%Vss:K)ο*Uv UyݗG Ѩg֡&HefIS*~ajD!MQƺ4cչ9P#` ,LH`ǚ)i4 $i* %tΈ3UhMz`6K5lFiF!% F/c6VK.wqaP fLcj3e1QqX}BU$LhR)UJV!UֲUk͔s^ͮ}4r!o *BpBlO$ L+*dI&]\ĎIQo):*t^*}'F6I jÊژ$D:Ban%u%F}u LJ4;X@{ ,dᵐFzP 0G8 bb$pIiXG":dmԣDت?a,z`{ 2j}of>®g؎f ]*7%Sqx2qXHcmga+՚7ᑘi 8gWOb|NWӘLiqVs;UƎ> ǥ5`  ֦ȧ_yӯv/!#>j!~}Cݑ9Zݥv_x-J-jwZqFU,C (\u%),,L`" y6G\N\4vy˱?~ 7nIqƇMܤ}X+7H_ZX}L^H H#ъWh'q<^P^,䕛hwuMjF -5: J7:Z %8("IpOx y&O 5V<-~%;[e+\yj]?t1{ ?gU&n|~{a'z~yw6W~pE]YT.՚$!bjhFR EԠ$JI}%55:J.:̪c$M%HǴ*9 WZ)|Aꋦ.UjUAsD5 Jj+UjU(¬ }[4 'aU(¬ Ẅ́iתY|U */V13bUٓUbKj]+8YV!̪x0*aVNG ޮpT'aUAEsT?$)( A!D7*-\BegDnQ+EN0Efu(7j)=&2p{pzTk|(VŲn}B¤@i)ӻr h 7Z֧[<wu56xc4~kGXåhJ5Sp7jp,̪%IX0OVVUdFtȬ 1+M#d*hA\^bR+ast.4TCZPR@]%fKSJͅ JR/Aiy)э*cia ΰ1i;F.gR|F\՗hq)nYJK_Q0,6 29h6G$y2 }Iu5CDxۢuuu]]Ee-xȥfִg:}t_}:;q1K.:|С'=i@h9~g81/>B bO_'6)߫5LUTv9$W@qA;!bOݛoB%;kNQ{;LrѢl:`Gm}fu\=&/&IU2~ _]sR֛5c(lwF}aAyw5; Rr]B^2  Ļa)qJK<;2AǨ" JI"[|9/ǧF9$|'Gخ;.@(j-8{:([k&q^ã,[{vT\skC%ҜqBkKN0apրcdcRea֨ JlPت__fKbW B *|J @yhlQb_Z"e{Zbk^~^Lzѕv+Z :iUe}O[*uT頸5(`NB*vjnS5Hݣ^Q1@v7%ͮ }ֵߥ0咝o%x9=/ndPsc/nޱC9Srp61ׯxonWP5cRL]9o#m?]V"?/ Eւ`p.6aʏbmBY`I5p&j8JNEA<YU$/~?=̜ќ7~ʺAÊr۟!*d+ۗ‘wۢ.FeOr ΙhU"¥sf(4aQ~Uu\w(/N*>]'a\@hflDˈ#e3 nNx@vBM2H>P%Z LiA ZJ}!np9i9~!.h~p{c{H2< (FI?681 NR ,AH8*^PFB ]8zNMj&9xm:Fn8oD#b@|E`p*Y1LKhjO”CT"'ml $7jTQ W[c> SIQ= GaRQʔ(% Р+UC)5Wr 1_A\=JRU qQ-: Q^ڼ,0ȈW\#2Ԝ.(UR[rF+Yhyɯ IyߏfP |J*Ĉdn;`ʔjcxa\ BhQZfiPRk?6*0;5^3fEK">-9o3ܯBἸYҴx_Ee+}T ==(,"%#+;!V{/mlb_> ^tdKW˽<J5Cxc^BC8u(* s/+g:oRcbD3f`GjЌ/N)R.Vqle9bGB]Pl@)9+VH".z `avKe^R)MJ{C}F^>@)[[Iݷ2Ԍ t}>¤L-UB&U {Ѻ@)TB(jpYuxT!gTH'a].k.T&/d=l em c*Л8|9I^%,$Hf%K)x}1k+lW*v秛zRX$RhZ<&~*%u0m=(P%Jp0(<ٙ?WS ›癛{lΟl[x. {?*_;pv|7zŋ/?J~5gx{V94O|.˯ZuJ|)y=#6BuPW躶$Mlew;yIFYK. -'U]Gf7zkO&Z8zS5^Qq qQwklk LՅU UACñPD1tV8L #+LI dHFwsc#p*m +\?FX'͚ *;aD1ïGtiɧO/!d'2&cLfRxf:]a YrȺ>@}McW%!qWf8 E :iV1<>k\b}A6o%…^X7˞]yyVO]A&#齳tJr]Bkח5Cӝv0 E uz.VdXHý+\j)P? p,t6͢h%d]E+}HoL!h7IBgP3܍xn7; R ƚ^!A}Sڏ,q&mēh">rKmA# \h "%$ru'HB׈>r8If7 C0X`l@iKW)e7Rٲv:SUYnZE&?M}aZma*1J1v;66STB"98M83}EE:g!k9X}D}/ZaÌTzkvXi7zn|㗰oP3@@QRfJf1x E܎ZGQkr=з¥:7VyX/>SI&)@DG)C""$[Ioz ^#BZTpzI $&`=>|ljۤĄvhR c,33SdAFFœ&;& ,hNHZ 7)=K$ZEȆ+Ĉ)h *ip<rZ=kUς9׋V= ^6%M4όȑ\v}B~&q2gDZ &P> 4JCDTE3 g)CFBN37DEVJ๑ f)q"`6PF`; k*iZq=@.]:Ŧb=SseX]lD&L:O=L)NPPF',7uӉҨy2@d/T10ԾΏhBZ5K߼zcl,H~AQ, @)eV$J)=HuD& #2nnPZ[->Aّzv&|%G-8 YI>E3*r{wSG=}/~3Q{ Wu a4O1u_`NJ6f弾.vuˌIz!<P^'\'Ō4v8=gHw!=: u,ߨ+g>H&I]B%Ӥh쒼Bsz;7T=pLq!$)'17WVgH=_fpґz 59;L %| 839\ N:;@5N+{r xV#N0r̴Hy28p*S|]0% R `=VOem籮D-t}T;P)o$Boa:3nbn]'ջu5 nkI()/(GnΊ ׻*_udh?fܩTIXC3V|4dw>~:#gKޓ;8Cm_OpuV@N %4Lړfh V4ETtqIwtԝ ѥVI1ҟn7)@ud#:0]~h.6q#@723@5cu_\J01|y(S @)'A$̻TV M2%|І!1] ɶ moErWY6[+ \@y@[Ba9%:l8 "hp7v47ڷ ,9%ȬAY)[&) AqVlևoU3$([R8ʔo]93t%YF'L1Dq'γcK&h>7szQp{  `F;<.QM~3+\ǼpH)٠0i.gn0nw]J} N{t+ҖWŠ."ADPɑD mH?Yv'U:}͝:H ?VԂ22v:N2H4BPVq{wp5J(n2HufIhy!hvY(0k8熄H& k1fˑ \J w3YL2u#|VQJjow͟Z?;/l(Un>84_z@JH%-x'Kj?vYDf|ja|Q:%Oo³7̢VZ,z#؀u2eEQ!mG )JM%9K}#ݕ_h)GxN zldþ$4œP@HN3V;g2F>mzR1Ը70ť/d-[u.|o;ˠOERk`Q@y(ahm>8[d"ڔX3HC6UC##>d#1ʲ1J8{dQ6]ۮǧMc3ɀ|;,ċ5@TʙNu8pAx_ Q u˴?t| wZeO9B7>nK(7,]^亵۲ܛ򶙹e&Cb +\BApP[AuzR}gF;.vfT5@]^n_^ Nx'rJ'K`LX#i[7~1 |Reb fKP%;(1ia2oh<3BѦOՓ<v }HhiogVOÜEmz@Ȋ{!:npOCL9ΛzQO^]֌("\H0Qkb4rlMh(Rd}bҌ>s~٨Q*K}/QGvZjˆVx\N:9Y+2#-_3Uv#%u~IqS\UE 'ȹC5 mܨ_?/UǐMzW-󲽻,M^\0:Sn,^+\u픫Q~8j^EӚ\w9m*1ůɟ뇣M5uM??Myjs%*mITw?hۘvHz%qIV2o{mL5͡4pcWzvxhE8bE.@Do*|q,X&qC c,d|G_Ҍm2փȣZZ2ª\C<ӯ3.|/Wuq,GN#_o?Cj=" .wE /ƯRXo"cB׫4%UY.*Xgv>m.9=y U54W^Oxlˋ>Q',>C֑@,cQp#:֘oA 1R:~C"4-d]_/aE.`v5-9+z ` 9*ji22 ^zCHG(lP޵#e1K,pN<-Ւ-˶$_-J#;܊٫ėvb+^>~)PQq|PBŃvi7?)|mOc>~ O?[b5zNJӭ$-IK!ABр1"rGO%Xӭl[IAuGC%eQ^,9HW 9q2#B "3MHg\-E`t"s5l8ϰb߫) EMfL=NՠZA $'ߴbbMXphZڵ #)K^BK^58Ev >*VZȾQZTڋQ_'e$䔨]M7/%uHYsqjtzn~? >|?~tۇ[2Nzr/ ju~raz;a8Ӡ>mSnO?~ ր=f]_}7m8bpN!ګ̯MzsU_J{SdoU˜ozU2lAS:SԝdUJQk 9h>h\aJdx- vtԼ%bIƃL|&YtvL zq=5[kK:swGM B%SQ0t!'y #;؎ZbRg \&uP)̦eQ kpɏ1QP (|EA{3r:/ >Q"ѱؠ _|[k-*z>'}L6;#t'}H/&ߝiA?bu (-eifhnղƂMւs(4D6Ec]&Fn{|ADc{`M6ǬSErXA'*|dqr[7U:tH47LB'sP6v]hR <޳6L-s::\ۮ[$BC?~p%b`(K-zY1)AD,D@)cyg.?[lӒ ۥ>Nj\+~~, JdOi9KĤ'ivQ%MR)m%0n))L!\DF'ORSa{+H,NKELm 4OR2I E#zNqo1!Rxf =Ҕ"S@YEqDSAF,@+J\YZO^UgA=)elB 皬Z:ӄR&6_6FC5;/LMv8($g ntOViuDc39X%8LI'уJpOZTwL6e%f91~Jo>\ήF˯?ZśT=+Q ձK":+yq~ڼa7-A5!՟׻Lt\7"kGMyLaΩH4dD/x`Tנ_M|\1Fc8k/"kik5yxJ!boڿƞ;Eo0:Rv>n(@嚰c S?1doÿ5ǣE([\_ۦp&kC?І^e%KN!$SՔd,RqQ1Ǔ|e2d+Ŗ6.CYg)dh'm 9>HKTOǢ\ĒwIc&]Zn'bG +b4`!m@]S_{#PϦy(Hs/U(sl|@** jiU+ѹ#x|Z>N۵r;Yig_fc =YGA(l>Ȩ\"b&#ϛ@3̠02mR|'5c{MyxXmֿah(.m#K Rh¥t œ$o$>M~%xπ#ٲs}ص@Jn1 L%)'a Ɗ<.$q>/F̠- yc, @ >N% D@ 7j#jݲh5d'ϥJ9$^tjwn>`{ݴZ#Vn7Vb_g#?MubVT)˜z3HYQʠ) >|kl((WEtק0mH‹ w1.99s'\I[6p4մ& 5uo? J2rhvOKM3ivSsm욦 5 SDZQmE 0uB#OL$[Y޴j6|.*ɄsR7l}5Twh N/}S;ǷwR4b51ك*k^KYB`V5!Riދa</kR]LP&PvSM #"NLݵD}!/ymt(tMЧ.sC-iPYBkd e#dAu˯^:(U;g»:rW刪UFATKZJץ>RHniz ݓ{eI`Eh֮Rg0aI̍mC3ZϜJ QP,) -Ϧ@%@+n=*F vsQ2s3w7Ĕ( Mۖ+0'J8< Y ,J/.SɥOtpic8\6qv@e+; ȯ2-6<. \O¾*7T(oj)Aovnj):8ަV2Vҫ {Ъ ? IGiO¿l*h"ٙgw7|P9:!:}`pŎRD8! lKLvPnj%= UixC(!⁙}=y7ɺQy[ݐ5ݴѲ0֎[twB^`P넔ڈ,AzL#" EB*avRȵ} cy-ɝm0Aq2zfOG)Cv6=v>*s8r{jdz Did`Bi.ܲTHн,/$ ɭA>ܤDx\\}-~7?LGO-f?#oZD~b>&qTk4{~)ܧ"Hu0$lUiWn ȵYɲow>7Zecq*$_V?x 4 YJzنaC/Lw ƃOuM0vҗ0wڭn*'ϥZiW= -!aXhiUUjEhUkU-" Ʃ_E4wTpa-C-6o ˞O@A+hKAގA˴8ɘҁ'>/vX 5-; gKv\Z^YI x .\t!mR4eÙFZN,Mm^Aå@$M%##KƠRS1e\QbAޔ0d|:To\A kWdEBޭE=vт[hE E (C1+47|WFX=Krɵh壔؀HNC:Nr8I'sBp{}` S`PT=K= ޥ=qJ[fGKqTD[*5b|1O}yV*Ju %wo\?f(BHF- >bH+b+k #M84H/vRwhb)RTNh?#I;`݇bKa[~9uJHaK߬&@F)2 6̪("W?]XAtRT0 SO*|5`|+Ff?qA&`GA TB1Ԛ}nxjd?r=D ڧ,A?Y$Qߊ  bB)p,) E1Zu'Gц5TTɻW~6B7.iWJs.ggc)ǖ"̭ pv-趶UQ,\]-զBI~-EN,2e`]8)ɚEʪO bF-G6-,$eAgJ/ X9{>څ fޏfנkq!rݧ_\Vk5/Ǐ]hK$hjӢKH@$N׍{=bLkh&t1$TfdU TA7=;;& /O ZO׸??%Z[MoD/ǹLË4}4!nv!A tBFDjs&V,7WKMخnCk%U* P:>9Kk{a>0 LO7ϓUt\+w'΀N߮Nvc98J) 0aF#"D)y0LhG ƌњ#PQ"Z3H'!%TT&aHJmGK$, +,t*Kf1Q!+QJ'dH7f+R#Wň2!i/ewԋ:cd.C4} We377[!3ltU>oN*Պ#SnRJaƣTkMTLC8!P =5F8c$Jk$<UP(DʭL[DiH+3,D 0WC;<(O}AwJ!Fֈ!K9K dB(oUbrc#M.Yť| E>FPi~e%/Ǜ~w+|ˏ|};K6D# zw?'g?+$rWPUW/O@ ?G>7y;cߴJ}}!ku24m˖?xu OfT#p*qgYbG^AID :AI&A D ъH`D" nS@Г%oJ6_εDqA S˂ruSVΌH-۫(1k(ߎVWcCr]Z{A#1+&,hZwy9,1oxGE R#$ HxS0ӳE-*HbwxRIF Ry3|7h|'5X#\kK!*\ u:>,@jwgXRz0+?|k6?N{Sh⎲ r*_ vOwW4_KE>nr2hai*Q-Y37m*eb=;jx7E}nMec:MQǻuGHyƌz6,䙛h#"g5ET9u[:N̻5f4׻a!Dw) ^( WHr_Ҕ ?rfY)d}uLdz9m_<8H;5E#{zeyV+wd%$FD/JŷQ}J5\Kşt8ͼKUҷ(K,RNKYf &Uʡ!}6Wf{ҾgKtO}ۨ>_AF~}>yde+@T&Yznvkvr~FD>(Dy,huƔTDsD KRQ =鹡&L`U9`:t0 <¢X/ <*fukUyw Uy1&i/jTsѪ:*iU b<=jexLXsy(YؙTpLvq/pBG IeB$*G%W/bGE^CAg)gMs}=f2k6kPՒS8ehB AG]H|W}/Zrv0X^0Jq?t"iGIA Co4ӫDf9fzcgX@:RC̜xd+" ]0HO- h3amhNUT5Kv_j%X5!|J2|T H`rDFԂOb XMbs*ƼnQz noN1Z-jtZfD2eH 1^z'q:a1cR"'("(qc:1:q"u(0̵HkQ\yI\\2 \E)&45[3՚ :;怸T2)h<9qV@\ȋ<O )W"jޙG)uR1+גt% e6 I ZWPŽ PZg\; g$j7l(-"5a8"2x*!n\ j6ׂl^7T4i;Gz+Gr_Z PZV֘y&<[Bi\/"GۧoASE&K:zQeHE^KI5Sm!܈i# $wT=⫳~{3 (0 HdgH{*X56dBhI|o{v̓:{{ M-u*&xX#.(\ha %9>EA(hYt3\\9 C$wDBº\;ĉViUí&"Azhpw>'h)܂;NM=歯74YwXO[ #̺ÉigD-\h'FHl5ĉN{?E!?m`P]`GkWp~˞qg˛aj@ʵ't.9a+(.hT 9Bk(h\ w=iݩViEˮi5Tgv|z1ƺUK$iTa3J>`4ApJw9Wui9dba!Dؔ@+#& lCdAz6|G-كE_I;E׹-X37F6"M^6A]we<`!Dn︌2`!Dw)Stȁy[ېsFXLeE31d"dfMF/;SvVBۯ˹p׫Ee,7ʍhP!Ē G"XuwNv$l/֫#hw-MH`f2KwT65fcm8'j)`l0}] D@ EF .On3  \'ϼ { IDj KM2bks.!+\I*NU(}{m`Glt֪:t'n[D5 Z1TVEۛZЉ0y @=o%ug3zW&HP}Yc :3u-u8\#p:'1ډavGcO @;h.en:N;Rۄ)wX%һ wޘ`qz7$ޡLtw 6Q}_ԉ>}bB" FlF i4H(>čmPV$zAmAZqBը*٦l ˔Vj֗9j T)*%dYs°U\+v#a/e`В}"VV+zHi^-יl [g;ktbqFrX$"Q]lĞRv rDwa$FMLdTSd.h +/<ĭ;eu݅Qed5B嫃}0_G|5"|K^K+[o s PWbQ,jE ^xc"CjSƼpËeݼKUGh_~Wm^A(ΎMJkV~]ۃTiIz>u/&.O3/g>~y3dRH!S4hIZaLI APZ 9<Xm_7y!e*JYSQnEQ:'%:B׾)&IJzdBtby\m'G UpJ6r B[[(HCeq!]Y#UFN%pK[PDX@|;zFX5uy|4: MqW-T쟗lqVeJg@[ @g_n>~}[ '?ZU[_Wgwfֽ\dȼGh?ӺfQ~nE'nHo5@uʞ3MhTHj` (p msEn\|_ݭ"a2cf:zIB6ooԾ1/M4f QVXRj鵨K^)}D#F:;sn}kqQ |;IIS#x^!RXX" o  [Q ."@yQW-~h;CUPGiͫ5f,,{p^AxՠsOڎtrAbanq \Wީ+l^:{\HkkD76FvZXrҍv Lu}t%IF/EJ=b.k{UrӀmk\| ,1M<@0NP^P*s00O;Z+] kդ`*D-R,,yjn*@,XkM!wtIVdɍq-!c bxZ7*SSTATPHZ]CB9j4fkKF/[pqߕ-c3d_-%*?~OzX*w Pxy+uVZ\ַK`tnן+~[[6τsW/$ޗD)q~vU|{Hi) wr l%ޣ*y~uak±5iqjoZ9?k?sOFφLd,2!<'ŶL#gBБ!^:gsٝ^b>يrB8$>-IxI O_NtW9}Xv@jVӃ K^dIT861M^U]y8ы{l3b^Ѹ4B8I =AB:ɷ硝y HwD:4M$Cz 5}N W躙^ѼOse&TUQ5eA Vƕ~Q%BT<\t? gOUF(D8:T:{*Cԭ7GdP!h& PVk//zVCj΂l_gAj|( ӫH~f-bfrmntm 0HO ib덍2i&*!"NעoJ\ViԼw,h7,~iCMQJ: %gšȗ5R18IZKmyQΡXbj;#ؔD%qcݑdž@;hD1ɻsn:N;Rۈ6Ebxl݆@;hpFs3wA trߑݦ;6[~NH6B9D0e"1e5N+}5C"]Z^vxc#scGV%@v.R.+3I}-5 Ч@qP}NTRʓ*kږU $WTeyk4,(cQi+p%P'iI 0\OvJ-0V<āN< lٕ!XX$pߦ4/ (`k|T,zk Cd?i.Qi:`c$yx Kϳ {p+ͧx<|=ӔN?g_MЗg[k#sb:9N')S%NEK Yb[j;֠035jN߅8-5NRH2cTRKy`a#,.OX M:*{@G1X_ 8,Glb!(X0=znNAKM%+[CCW)bx: &4;F|n SPbt2""I(ЍkQRwYݹ~,(5aiQhx$4S⦩uwT(CI֢ka%hUR ֣Cyq,j1 *:u VK`f:&8ďDLSMGNtdXRhBIܜo:E$ 3V^~;Jq/Ed?9 RrLi}gJH)rjҥuQqKaLڞvϑZkIMB7B4ڰnkV$ºSysB`mʢ)kQZҩb'7Ejc&s̡\†}Q!=8੅݊^ɺuU "a *0܉TIx?$X4{r%5 gAALv;RM 4u& {"C C4S ]f:&n:N;Rt.0whw!Y\$uŹXz-W1}Gwp6yz!!H'0 L;k~$OQD7vK[!Ay/o;ێn+yġ*гD$UPJDUJšJ\z'.L*&Q@}a'.L&ԫգ5bHFa7`Kq]|~jsW|ZmVqV6zU)+e-Va>,_BU= Th%izM火 gPEzȃ!&Kdc{@BlE-!3Aaz7h(CF4`CqiTh潣PЄD7Zdck,ռ7K=rJu=~:^."m4#ip|gBTWKQ:s疯+څ(5sR_lKOqwnq2BnQ)Qgcmt9L!o8(D8= 5ʒnVKBBUihrR)JVҢj(*lm~xY]1C^M %[ЋMAQHںY# map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 20:02:55.705265854 +0000 UTC m=+1.095519030,LastTimestamp:2026-01-27 20:02:55.705265854 +0000 UTC m=+1.095519030,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.744865 4793 manager.go:319] Starting recovery of all containers Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750384 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750437 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750451 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750461 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750473 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750484 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750496 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750506 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750581 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750598 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750611 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750623 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750634 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750647 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750666 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750687 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750730 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750752 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750767 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750777 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750800 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750815 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750826 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750837 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750848 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750859 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750872 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750884 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750894 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750905 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750916 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750929 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750943 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750959 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750970 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.750999 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751011 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751022 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751033 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751044 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751055 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751066 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751087 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751102 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751136 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751148 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751166 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751178 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751191 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751208 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751219 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751230 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751251 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751263 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751281 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751294 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751306 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751318 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751352 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751363 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751375 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751386 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751396 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751406 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751420 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751430 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751440 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751457 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751473 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751484 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751495 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751511 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751521 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751532 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751582 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751594 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751605 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751616 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751628 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751638 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751651 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751661 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751671 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751689 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751705 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751717 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751737 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751748 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751768 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751779 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751789 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751799 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751816 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751835 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751846 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751858 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751873 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751885 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751899 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751910 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751922 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751935 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751945 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.751955 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752004 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752020 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752041 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752058 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752074 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752087 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752099 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752110 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752128 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752139 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752149 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752166 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752178 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752188 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752199 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752210 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752220 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752231 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752240 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752253 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752270 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752281 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752297 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752308 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752322 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752334 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752345 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752362 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752373 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752384 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752406 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752419 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752431 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752442 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752454 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752466 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752478 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752491 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752503 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752515 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752528 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752539 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752570 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752588 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752598 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.752612 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754256 4793 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754301 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754317 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754325 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754373 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754383 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754393 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754403 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754412 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754421 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754432 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754441 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754450 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754460 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754473 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754484 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754493 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754502 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754512 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754521 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754531 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754556 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754571 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754593 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754604 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754619 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754633 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754642 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754650 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754660 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754669 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754678 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754687 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754697 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754705 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754714 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754723 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754736 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754744 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754761 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754773 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754784 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754795 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754806 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754818 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754836 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754848 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754864 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754898 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754910 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754926 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754944 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754955 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.754966 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.758738 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.758757 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.758771 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.758782 4793 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.764004 4793 reconstruct.go:97] "Volume reconstruction finished" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.764024 4793 reconciler.go:26] "Reconciler: start to sync state" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.770186 4793 manager.go:324] Recovery completed Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.779212 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781236 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781246 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781827 4793 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781896 4793 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.781954 4793 state_mem.go:36] "Initialized new in-memory state store" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.800276 4793 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.801921 4793 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.801960 4793 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.801981 4793 kubelet.go:2335] "Starting kubelet main sync loop" Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.802022 4793 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 27 20:02:55 crc kubenswrapper[4793]: W0127 20:02:55.802607 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.802681 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.843146 4793 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.902991 4793 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.932329 4793 policy_none.go:49] "None policy: Start" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.933232 4793 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 27 20:02:55 crc kubenswrapper[4793]: I0127 20:02:55.933255 4793 state_mem.go:35] "Initializing new in-memory state store" Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.943238 4793 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 27 20:02:55 crc kubenswrapper[4793]: E0127 20:02:55.943922 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="400ms" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.043833 4793 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.046773 4793 manager.go:334] "Starting Device Plugin manager" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.046902 4793 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.046922 4793 server.go:79] "Starting device plugin registration server" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.047292 4793 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.047310 4793 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.047403 4793 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.047527 4793 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.047538 4793 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.055612 4793 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.103838 4793 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.103958 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105031 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105193 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105370 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105440 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105760 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.105855 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106019 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106068 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106425 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106444 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106726 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106736 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106819 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.106952 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107026 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107053 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107776 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107788 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107841 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.107977 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108135 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108169 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108470 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108504 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108669 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108698 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108869 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108895 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.108907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.109309 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.109338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.109349 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.147625 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.148599 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.148627 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.148636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.148658 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.149127 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.238:6443: connect: connection refused" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169039 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169080 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169105 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169126 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169147 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169166 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169187 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169215 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169250 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169305 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169329 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169351 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169374 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.169397 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271058 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271143 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271177 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271204 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271232 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271287 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271317 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271345 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271380 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271407 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271439 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271468 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271494 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271521 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271588 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271942 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271974 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271988 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271940 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272032 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272062 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272073 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271942 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272006 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272061 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.271974 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272064 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272033 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272062 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.272150 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.345406 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="800ms" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.349494 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.350889 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.350948 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.350965 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.350998 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.351607 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.238:6443: connect: connection refused" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.428729 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.434269 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.456913 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.463131 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.470970 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:02:56 crc kubenswrapper[4793]: W0127 20:02:56.477838 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-805bdc38b6e73ef9e58e3f1fcd181dc67034895446ab50d5c74f6d95478e82fe WatchSource:0}: Error finding container 805bdc38b6e73ef9e58e3f1fcd181dc67034895446ab50d5c74f6d95478e82fe: Status 404 returned error can't find the container with id 805bdc38b6e73ef9e58e3f1fcd181dc67034895446ab50d5c74f6d95478e82fe Jan 27 20:02:56 crc kubenswrapper[4793]: W0127 20:02:56.478813 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-98d7c6af208038b8934b89704c210e17eb50c2b579c6306ee6f17b22101ed3e4 WatchSource:0}: Error finding container 98d7c6af208038b8934b89704c210e17eb50c2b579c6306ee6f17b22101ed3e4: Status 404 returned error can't find the container with id 98d7c6af208038b8934b89704c210e17eb50c2b579c6306ee6f17b22101ed3e4 Jan 27 20:02:56 crc kubenswrapper[4793]: W0127 20:02:56.489589 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-2c09763d1d3fdfa1dca5d1ed2cb7d577a2fa4934d2bbaf1e36d3aaee449eede3 WatchSource:0}: Error finding container 2c09763d1d3fdfa1dca5d1ed2cb7d577a2fa4934d2bbaf1e36d3aaee449eede3: Status 404 returned error can't find the container with id 2c09763d1d3fdfa1dca5d1ed2cb7d577a2fa4934d2bbaf1e36d3aaee449eede3 Jan 27 20:02:56 crc kubenswrapper[4793]: W0127 20:02:56.630637 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.630990 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.733124 4793 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.743206 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 06:14:19.655205785 +0000 UTC Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.752526 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.754238 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.754273 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.754283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.754303 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: E0127 20:02:56.754821 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.238:6443: connect: connection refused" node="crc" Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.806074 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"98d7c6af208038b8934b89704c210e17eb50c2b579c6306ee6f17b22101ed3e4"} Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.806845 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"805bdc38b6e73ef9e58e3f1fcd181dc67034895446ab50d5c74f6d95478e82fe"} Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.807682 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2c09763d1d3fdfa1dca5d1ed2cb7d577a2fa4934d2bbaf1e36d3aaee449eede3"} Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.808371 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"23fb4b4c1eff1346d49b2aaf3de6b8be363496fea9c56d04302b73776b52aeb6"} Jan 27 20:02:56 crc kubenswrapper[4793]: I0127 20:02:56.809301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b9053491a16752093933d8dd674d808678fb166842dddaf7fbdbd4921a8d2dec"} Jan 27 20:02:57 crc kubenswrapper[4793]: W0127 20:02:57.130254 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.130345 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.147655 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="1.6s" Jan 27 20:02:57 crc kubenswrapper[4793]: W0127 20:02:57.205821 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.205890 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:57 crc kubenswrapper[4793]: W0127 20:02:57.278476 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.278558 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.555962 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.557474 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.557503 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.557514 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.557542 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.558100 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.238:6443: connect: connection refused" node="crc" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.636115 4793 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 27 20:02:57 crc kubenswrapper[4793]: E0127 20:02:57.637043 4793 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.733948 4793 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.744230 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 17:22:58.897504128 +0000 UTC Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.813619 4793 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="0753d10384c65b09f375673073051d1cf8309dd69ac51676617071f33a313a59" exitCode=0 Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.813734 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"0753d10384c65b09f375673073051d1cf8309dd69ac51676617071f33a313a59"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.813794 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815007 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815054 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815077 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815401 4793 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a" exitCode=0 Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815470 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.815483 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.816309 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.816345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.816357 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.821240 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.821307 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.821344 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.821370 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.821255 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.822631 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.822664 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.822675 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.823659 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1" exitCode=0 Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.823722 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.823889 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.824992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.825046 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.825071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.825482 4793 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4" exitCode=0 Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.825534 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4"} Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.825749 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.827264 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.827323 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.827342 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.827468 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.828321 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.828367 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:57 crc kubenswrapper[4793]: I0127 20:02:57.828384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.733165 4793 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.744621 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 06:13:15.816204019 +0000 UTC Jan 27 20:02:58 crc kubenswrapper[4793]: E0127 20:02:58.751828 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="3.2s" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.832772 4793 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a" exitCode=0 Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.832855 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.832983 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.833926 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.833960 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.833969 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.836367 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"08528cf770f8410a20d0d811501da40838dece3ce776acdb5d73b371e795bbb7"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.836489 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.838456 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.838476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.838483 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.839365 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.839400 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.839410 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.839475 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.840427 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.840476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.840494 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.844753 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.844923 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.844963 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.844976 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23"} Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.845747 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.845776 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:58 crc kubenswrapper[4793]: I0127 20:02:58.845788 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:58 crc kubenswrapper[4793]: W0127 20:02:58.926080 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:58 crc kubenswrapper[4793]: E0127 20:02:58.926154 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.158853 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.160314 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.160338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.160347 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.160367 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:02:59 crc kubenswrapper[4793]: W0127 20:02:59.160467 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:59 crc kubenswrapper[4793]: E0127 20:02:59.160629 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:59 crc kubenswrapper[4793]: E0127 20:02:59.160930 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.238:6443: connect: connection refused" node="crc" Jan 27 20:02:59 crc kubenswrapper[4793]: W0127 20:02:59.174630 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:59 crc kubenswrapper[4793]: E0127 20:02:59.174723 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:59 crc kubenswrapper[4793]: E0127 20:02:59.206005 4793 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.238:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188eaf0d674616be default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 20:02:55.705265854 +0000 UTC m=+1.095519030,LastTimestamp:2026-01-27 20:02:55.705265854 +0000 UTC m=+1.095519030,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 20:02:59 crc kubenswrapper[4793]: W0127 20:02:59.510856 4793 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:59 crc kubenswrapper[4793]: E0127 20:02:59.510964 4793 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.238:6443: connect: connection refused" logger="UnhandledError" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.733996 4793 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.238:6443: connect: connection refused Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.745387 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 13:47:42.452826514 +0000 UTC Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.850155 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4"} Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.850208 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b"} Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.850266 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.851101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.851160 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.851181 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852633 4793 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2" exitCode=0 Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852709 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852736 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852769 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2"} Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852900 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.852987 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853403 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853431 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853448 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853506 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853524 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.853533 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.854283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.854307 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:02:59 crc kubenswrapper[4793]: I0127 20:02:59.854317 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.745587 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 01:45:05.424015775 +0000 UTC Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860363 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc"} Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860485 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860601 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860485 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589"} Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860397 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860771 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140"} Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860814 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07"} Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.860837 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a"} Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862170 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862745 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862799 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:00 crc kubenswrapper[4793]: I0127 20:03:00.862835 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.746390 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 13:41:16.242842728 +0000 UTC Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.858954 4793 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.862951 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.864042 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.864168 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:01 crc kubenswrapper[4793]: I0127 20:03:01.864291 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.361273 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.362424 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.362451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.362460 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.362479 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.661158 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.661615 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.662773 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.662808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.662821 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.668999 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.746849 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 13:31:11.381419365 +0000 UTC Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.837470 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.866451 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.866486 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867605 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867641 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867652 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867646 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:02 crc kubenswrapper[4793]: I0127 20:03:02.867820 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.124606 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.124801 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.126123 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.126197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.126212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.747471 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 09:40:56.198083221 +0000 UTC Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.882205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.882366 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.883808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.883847 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:03 crc kubenswrapper[4793]: I0127 20:03:03.883861 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.486923 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.487206 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.488721 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.488767 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.488776 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.619696 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.619969 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.621893 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.621945 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.621963 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.745238 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.748538 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 12:04:12.385873117 +0000 UTC Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.874976 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.876008 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.876068 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.876090 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.994355 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.994527 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.995772 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.995804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:04 crc kubenswrapper[4793]: I0127 20:03:04.995815 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:05 crc kubenswrapper[4793]: I0127 20:03:05.749069 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 11:18:40.839523792 +0000 UTC Jan 27 20:03:06 crc kubenswrapper[4793]: E0127 20:03:06.055738 4793 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.543055 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.543225 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.544348 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.544384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.544393 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:06 crc kubenswrapper[4793]: I0127 20:03:06.749705 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 07:01:57.396222444 +0000 UTC Jan 27 20:03:07 crc kubenswrapper[4793]: I0127 20:03:07.750858 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 09:05:36.498449908 +0000 UTC Jan 27 20:03:07 crc kubenswrapper[4793]: I0127 20:03:07.995353 4793 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 20:03:07 crc kubenswrapper[4793]: I0127 20:03:07.995490 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.751730 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 00:07:30.576616917 +0000 UTC Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.778053 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.778232 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.830013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.830107 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:08 crc kubenswrapper[4793]: I0127 20:03:08.830123 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:09 crc kubenswrapper[4793]: I0127 20:03:09.780217 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 16:22:39.617299704 +0000 UTC Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.734755 4793 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.780351 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 14:44:46.232919157 +0000 UTC Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.889189 4793 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.889246 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.893207 4793 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.893261 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.903784 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.905523 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4" exitCode=255 Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.905583 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4"} Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.905745 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.906457 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.906486 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.906495 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:10 crc kubenswrapper[4793]: I0127 20:03:10.906954 4793 scope.go:117] "RemoveContainer" containerID="dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4" Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.780994 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 03:22:47.575006014 +0000 UTC Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.910081 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.912671 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da"} Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.912861 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.913739 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.913773 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:11 crc kubenswrapper[4793]: I0127 20:03:11.913783 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:12 crc kubenswrapper[4793]: I0127 20:03:12.781347 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 08:39:54.223149401 +0000 UTC Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.125400 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.125731 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.127297 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.127385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.127415 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.781854 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 13:22:21.506925193 +0000 UTC Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.888917 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.889169 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.891265 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.891326 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:13 crc kubenswrapper[4793]: I0127 20:03:13.891347 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.749781 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.749991 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.751033 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.751075 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.751092 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.756094 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.782791 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 17:22:06.58579127 +0000 UTC Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.920205 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.921023 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.921057 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:14 crc kubenswrapper[4793]: I0127 20:03:14.921069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.783530 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 09:42:03.716471938 +0000 UTC Jan 27 20:03:15 crc kubenswrapper[4793]: E0127 20:03:15.881697 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.886111 4793 trace.go:236] Trace[1986541879]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 20:03:03.132) (total time: 12753ms): Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[1986541879]: ---"Objects listed" error: 12753ms (20:03:15.886) Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[1986541879]: [12.753714241s] [12.753714241s] END Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.886164 4793 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.889842 4793 trace.go:236] Trace[32694396]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 20:03:03.444) (total time: 12445ms): Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[32694396]: ---"Objects listed" error: 12445ms (20:03:15.889) Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[32694396]: [12.445489773s] [12.445489773s] END Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.889881 4793 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 27 20:03:15 crc kubenswrapper[4793]: E0127 20:03:15.890576 4793 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.892016 4793 trace.go:236] Trace[1539481795]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 20:03:03.524) (total time: 12367ms): Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[1539481795]: ---"Objects listed" error: 12367ms (20:03:15.891) Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[1539481795]: [12.367919294s] [12.367919294s] END Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.892058 4793 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.895824 4793 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.901014 4793 trace.go:236] Trace[780417916]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 20:03:04.600) (total time: 11300ms): Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[780417916]: ---"Objects listed" error: 11299ms (20:03:15.900) Jan 27 20:03:15 crc kubenswrapper[4793]: Trace[780417916]: [11.300036916s] [11.300036916s] END Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.901038 4793 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.910757 4793 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.934109 4793 csr.go:261] certificate signing request csr-9xvzl is approved, waiting to be issued Jan 27 20:03:15 crc kubenswrapper[4793]: I0127 20:03:15.942436 4793 csr.go:257] certificate signing request csr-9xvzl is issued Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.604658 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.609789 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.709146 4793 apiserver.go:52] "Watching apiserver" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711101 4793 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711415 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-mpxz5","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711711 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711860 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711917 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.711926 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.711958 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.712120 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.712122 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.712966 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.713428 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.713514 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.727500 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.728223 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.728383 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.728893 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.728949 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.730036 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.730183 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.732099 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.732304 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.732330 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.735864 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.735866 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.743349 4793 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.747081 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.757392 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.765017 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.773259 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.781058 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.783808 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 12:04:53.293298231 +0000 UTC Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.789026 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.798247 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802039 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802080 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802099 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802115 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802130 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802145 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802161 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802176 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802194 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802216 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802233 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802249 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802264 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802278 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802293 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802310 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802325 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802343 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802357 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802370 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802385 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802399 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802416 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802458 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802475 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802489 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802503 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802517 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802532 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802564 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802584 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802600 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802616 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802646 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802663 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802678 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802693 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802707 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802721 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802745 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802763 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802777 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802793 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802807 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802822 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802836 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802852 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802871 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802885 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802905 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802919 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802934 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802949 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802964 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802979 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.802993 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803008 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803022 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803035 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803049 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803063 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803077 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803092 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803106 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803120 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803136 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803156 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803173 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803190 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803205 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803220 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803234 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803250 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803267 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803283 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803297 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803312 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803328 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803330 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803342 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803395 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803420 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803445 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803467 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803523 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803537 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803562 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803587 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803615 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803640 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803663 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803684 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803709 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803731 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803767 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803790 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803811 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803834 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803855 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803897 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803920 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803944 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803969 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.803991 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804013 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804036 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804059 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804080 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804106 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804128 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804152 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804174 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804198 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804221 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804244 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804246 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804265 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804290 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804313 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804337 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804360 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804370 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804382 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804406 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804430 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804453 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804478 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804501 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804523 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804561 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804588 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804611 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804633 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804658 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804683 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804706 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804728 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804752 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804774 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804895 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804921 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804941 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804966 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804992 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805015 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805037 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805068 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805093 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805117 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805139 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805162 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805186 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805216 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805240 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805264 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805287 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805310 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805334 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805359 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805384 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805409 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805433 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805455 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805481 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805505 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805531 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805607 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805680 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805714 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805740 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805767 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805793 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805840 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805868 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805892 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805940 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806034 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806082 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806246 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806280 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806308 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806339 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806364 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806390 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806414 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806438 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806465 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806490 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806514 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806538 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806581 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806611 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806637 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806661 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806685 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806711 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806736 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806762 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806788 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806812 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806859 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806891 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806939 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806996 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807031 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807059 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807087 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807117 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807147 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807172 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807200 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8338625a-5d99-48c1-a7ff-d4542b624045-hosts-file\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807228 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807253 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mds8v\" (UniqueName: \"kubernetes.io/projected/8338625a-5d99-48c1-a7ff-d4542b624045-kube-api-access-mds8v\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807279 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807302 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807327 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807377 4793 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807393 4793 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807434 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807450 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807486 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810225 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.820508 4793 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821343 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.804922 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805165 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805173 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805391 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805454 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805396 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805752 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805807 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.805989 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806073 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806238 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806443 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806678 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806761 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.831864 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806943 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807396 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807728 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.807766 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.808200 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.808329 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.808581 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.808617 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.808645 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809104 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809147 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809241 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809302 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809503 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809530 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809358 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809636 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809865 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.809935 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810012 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810250 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810375 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810412 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810413 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810672 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810790 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810946 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810963 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.810983 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811194 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811263 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811595 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811722 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811733 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.811940 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.812971 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.813490 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.832282 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.813670 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.815401 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.816121 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.816384 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.816611 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.816971 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.817482 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.817713 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.818499 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.818608 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.818642 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819027 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819052 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819351 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819394 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819826 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819934 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.819964 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.820525 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.820714 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821040 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821081 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821685 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821809 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.821891 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.822075 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.822232 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:17.32221244 +0000 UTC m=+22.712465586 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.822538 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.825888 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.826097 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.826315 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.826473 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.827372 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.827332 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.827525 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.827752 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.828669 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.828722 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.829029 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.829167 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.829664 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.829977 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.806905 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.832359 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.832362 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.832931 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.834879 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835171 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835393 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835573 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835672 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835750 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835979 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.835993 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836068 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836155 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836175 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836183 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836269 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836570 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836732 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836874 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.836999 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837024 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837106 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837125 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837236 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837379 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837435 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837462 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837646 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837672 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837774 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.837823 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838165 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838203 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838261 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838369 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838467 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838750 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.838769 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.839105 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.839365 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.839399 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.839488 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.838818 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.840795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.841315 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.841445 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.841618 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.841831 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.843781 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:17.343755438 +0000 UTC m=+22.734008594 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.843815 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:17.343806979 +0000 UTC m=+22.734060135 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.844782 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845055 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845087 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845199 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845423 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845411 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845648 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.845801 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.846659 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.855808 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.856702 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.856772 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.858055 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.868447 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.868611 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.868707 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.868790 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.868942 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:17.368917806 +0000 UTC m=+22.759170962 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869038 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.868665 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869257 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.868736 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869487 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869343 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869539 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.869817 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.870124 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.870686 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.870202 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.871047 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.871072 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.871088 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.871239 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.871464 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:17.371446788 +0000 UTC m=+22.761699944 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.871818 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.874929 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.875099 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.875656 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.875826 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.875869 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.876094 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.876149 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.875700 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.877752 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.877928 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878026 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878177 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878201 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878416 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878418 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878425 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878579 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878661 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878687 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878720 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878729 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.878813 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.879466 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.879927 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.880642 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.892164 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.895833 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.901573 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908666 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8338625a-5d99-48c1-a7ff-d4542b624045-hosts-file\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908725 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908742 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mds8v\" (UniqueName: \"kubernetes.io/projected/8338625a-5d99-48c1-a7ff-d4542b624045-kube-api-access-mds8v\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908757 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908844 4793 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908855 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908865 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908875 4793 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908883 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908892 4793 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908900 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908909 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908917 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908945 4793 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908954 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908963 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908971 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908979 4793 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908987 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.908995 4793 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909003 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909011 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909019 4793 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909028 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909036 4793 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909044 4793 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909053 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909061 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909069 4793 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909077 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909086 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909079 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909094 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909136 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909151 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909166 4793 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909179 4793 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909192 4793 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909205 4793 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909218 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909230 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909241 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909254 4793 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909265 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909278 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909290 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909303 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909315 4793 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909327 4793 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909339 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909351 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909363 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909378 4793 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909391 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909405 4793 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909417 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909427 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909452 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909463 4793 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909475 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909486 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909497 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909509 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909521 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909535 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909711 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909805 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8338625a-5d99-48c1-a7ff-d4542b624045-hosts-file\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909850 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909863 4793 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909872 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909881 4793 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909889 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909906 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909920 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909931 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909943 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909954 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909967 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909978 4793 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.909990 4793 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910001 4793 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910017 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910028 4793 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910039 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910051 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910063 4793 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910076 4793 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910088 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910103 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910114 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910127 4793 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910139 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910151 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910164 4793 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910175 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910187 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910198 4793 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910209 4793 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910223 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910235 4793 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910246 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910259 4793 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910270 4793 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910282 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910293 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910310 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910321 4793 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910333 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910345 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910356 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910367 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910379 4793 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910388 4793 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910397 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910405 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910413 4793 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910421 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910429 4793 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910438 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910446 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910454 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910462 4793 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910471 4793 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910479 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910489 4793 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910497 4793 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910506 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910514 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910522 4793 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910530 4793 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910538 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910566 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910577 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910588 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910599 4793 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910610 4793 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910620 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910634 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910646 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910658 4793 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910670 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910680 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910689 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910699 4793 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910709 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910721 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910732 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910742 4793 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910751 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910761 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910773 4793 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910785 4793 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910796 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910810 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910821 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910831 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910854 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910863 4793 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910871 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910880 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910888 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910896 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910905 4793 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910915 4793 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910926 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910937 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910948 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910959 4793 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910972 4793 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910984 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.910995 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911008 4793 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911020 4793 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911031 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911042 4793 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911055 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911066 4793 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911077 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911088 4793 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911100 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911111 4793 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911123 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911134 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911144 4793 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911152 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911161 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911170 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911178 4793 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911186 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911194 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911203 4793 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911211 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911222 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911233 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.911244 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.914502 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.924324 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mds8v\" (UniqueName: \"kubernetes.io/projected/8338625a-5d99-48c1-a7ff-d4542b624045-kube-api-access-mds8v\") pod \"node-resolver-mpxz5\" (UID: \"8338625a-5d99-48c1-a7ff-d4542b624045\") " pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:16 crc kubenswrapper[4793]: E0127 20:03:16.931019 4793 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.944080 4793 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-27 19:58:15 +0000 UTC, rotation deadline is 2026-11-15 15:20:14.408651234 +0000 UTC Jan 27 20:03:16 crc kubenswrapper[4793]: I0127 20:03:16.944135 4793 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7003h16m57.464518231s for next certificate rotation Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.011806 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.040872 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.047393 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-mpxz5" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.059334 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.066946 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.310291 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-gq8gn"] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.310682 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.313750 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.313816 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.313953 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.313825 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.316009 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.323685 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.334619 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.343840 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.357089 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.367227 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.374701 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.384148 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.394679 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.405436 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.415733 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.415972 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:18.415938356 +0000 UTC m=+23.806191512 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.416201 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bb16a16f-6f5f-4462-be09-372a8b10739a-rootfs\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.416347 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.416460 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416524 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416595 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416610 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416632 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.416789 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bb16a16f-6f5f-4462-be09-372a8b10739a-proxy-tls\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416898 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:18.416859638 +0000 UTC m=+23.807112794 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.416955 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:18.41694468 +0000 UTC m=+23.807197936 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.416990 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zdrh\" (UniqueName: \"kubernetes.io/projected/bb16a16f-6f5f-4462-be09-372a8b10739a-kube-api-access-5zdrh\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.417035 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bb16a16f-6f5f-4462-be09-372a8b10739a-mcd-auth-proxy-config\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.417069 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.417101 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417202 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417236 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417254 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417267 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417243 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:18.417236318 +0000 UTC m=+23.807489474 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.417313 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:18.41730256 +0000 UTC m=+23.807555856 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.517798 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bb16a16f-6f5f-4462-be09-372a8b10739a-mcd-auth-proxy-config\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.517883 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bb16a16f-6f5f-4462-be09-372a8b10739a-rootfs\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.517931 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bb16a16f-6f5f-4462-be09-372a8b10739a-proxy-tls\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.517957 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zdrh\" (UniqueName: \"kubernetes.io/projected/bb16a16f-6f5f-4462-be09-372a8b10739a-kube-api-access-5zdrh\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.518301 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/bb16a16f-6f5f-4462-be09-372a8b10739a-rootfs\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.518659 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bb16a16f-6f5f-4462-be09-372a8b10739a-mcd-auth-proxy-config\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.523261 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bb16a16f-6f5f-4462-be09-372a8b10739a-proxy-tls\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.540149 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zdrh\" (UniqueName: \"kubernetes.io/projected/bb16a16f-6f5f-4462-be09-372a8b10739a-kube-api-access-5zdrh\") pod \"machine-config-daemon-gq8gn\" (UID: \"bb16a16f-6f5f-4462-be09-372a8b10739a\") " pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.626775 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:03:17 crc kubenswrapper[4793]: W0127 20:03:17.641337 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb16a16f_6f5f_4462_be09_372a8b10739a.slice/crio-1b5079413ad31c697864efec5783cee3e25faa96abe056ee80dd71e921058bd2 WatchSource:0}: Error finding container 1b5079413ad31c697864efec5783cee3e25faa96abe056ee80dd71e921058bd2: Status 404 returned error can't find the container with id 1b5079413ad31c697864efec5783cee3e25faa96abe056ee80dd71e921058bd2 Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.676414 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8glmz"] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.677558 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-7k9v7"] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.677756 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.678095 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.679286 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-fgp7j"] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.679928 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681257 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681322 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681277 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681631 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681806 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681917 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.681924 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.682139 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.682524 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.682591 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.682861 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.684908 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.688253 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.688259 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.697959 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.728092 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.744414 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.756980 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.768194 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.775538 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.785896 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 09:31:50.852327472 +0000 UTC Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.788028 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.805956 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.807656 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.808161 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.809458 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.810301 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.810887 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.811902 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.812462 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.813566 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.814146 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.815034 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.815481 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.816169 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.817015 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.817482 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.818343 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.818880 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819423 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cni-binary-copy\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819472 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819497 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-system-cni-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819521 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819566 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-binary-copy\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819590 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819614 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819635 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819661 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-k8s-cni-cncf-io\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819683 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-bin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819722 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-netns\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819742 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819766 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819810 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-socket-dir-parent\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819832 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819853 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-os-release\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819945 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819943 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.819990 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820010 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820028 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-multus\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820044 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-os-release\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820063 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kccqx\" (UniqueName: \"kubernetes.io/projected/fe475131-3b65-45aa-a877-190a8bdec86f-kube-api-access-kccqx\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820082 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-cnibin\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820098 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820126 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cnibin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820141 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820156 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820189 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpdjd\" (UniqueName: \"kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820205 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-kubelet\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820223 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820238 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820257 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-etc-kubernetes\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820277 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmlxm\" (UniqueName: \"kubernetes.io/projected/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-kube-api-access-mmlxm\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820294 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820311 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-system-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820317 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820328 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-conf-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820344 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-daemon-config\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820389 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-multus-certs\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820405 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820447 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-hostroot\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820461 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820475 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820489 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.820929 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.822906 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.823241 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.823802 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.825173 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.825889 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.826883 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.827258 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.828306 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.829010 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.829900 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.830431 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.830928 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.831721 4793 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.831816 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.833487 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.834065 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.834913 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.835349 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.837527 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.838816 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.839360 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.840587 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.841695 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.842302 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.842948 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.843508 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.844843 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.845496 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.846474 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.847005 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.847882 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.848842 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.849348 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.850187 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.850735 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.851234 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.852513 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.853084 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.853591 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.863349 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.873381 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.887231 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.897343 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.905455 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.912602 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921788 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921835 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921861 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-multus\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921888 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-os-release\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921911 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kccqx\" (UniqueName: \"kubernetes.io/projected/fe475131-3b65-45aa-a877-190a8bdec86f-kube-api-access-kccqx\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921933 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cnibin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921933 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921956 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922619 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.921998 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922708 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922039 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922422 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-os-release\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922044 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cnibin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.922467 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-multus\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.923466 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.923621 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpdjd\" (UniqueName: \"kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.923992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-cnibin\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924147 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924076 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-cnibin\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924251 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-kubelet\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924430 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924607 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924367 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-kubelet\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924744 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.924527 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925530 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-etc-kubernetes\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925604 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-etc-kubernetes\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925619 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmlxm\" (UniqueName: \"kubernetes.io/projected/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-kube-api-access-mmlxm\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925654 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-system-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925684 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-conf-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925739 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-daemon-config\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925769 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-system-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925791 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-conf-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925775 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-multus-certs\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925840 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925816 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-multus-certs\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925910 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-hostroot\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925934 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925959 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925979 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.925998 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926020 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cni-binary-copy\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926041 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926062 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-system-cni-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926087 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926106 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926134 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926158 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-binary-copy\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926179 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926206 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-k8s-cni-cncf-io\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926233 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-bin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926278 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-netns\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926302 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926323 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-socket-dir-parent\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926344 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926367 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926387 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926405 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-os-release\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926425 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926486 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926516 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-daemon-config\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926522 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-hostroot\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926562 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926585 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926593 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926615 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926637 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926672 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-var-lib-cni-bin\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926674 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-k8s-cni-cncf-io\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926696 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926724 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-host-run-netns\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926716 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926749 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926787 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.926949 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-cni-dir\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927001 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-multus-socket-dir-parent\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927034 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927072 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927103 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927171 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-os-release\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927305 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927366 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-cni-binary-copy\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927365 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fe475131-3b65-45aa-a877-190a8bdec86f-system-cni-dir\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.927616 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fe475131-3b65-45aa-a877-190a8bdec86f-cni-binary-copy\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.928792 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-mpxz5" event={"ID":"8338625a-5d99-48c1-a7ff-d4542b624045","Type":"ContainerStarted","Data":"2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.928822 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-mpxz5" event={"ID":"8338625a-5d99-48c1-a7ff-d4542b624045","Type":"ContainerStarted","Data":"2beb87b566bb7733b649e3fc8cc19535e4a3be2b51eadaebaf1cec37b1ca1eb5"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.930451 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.930492 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ac9cc3a3c5ac98d688ee916626d5faaa79b751e2719f10d30b03ffc0410c3dd4"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.932634 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.933189 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.935171 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.935997 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" exitCode=255 Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.936129 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.936189 4793 scope.go:117] "RemoveContainer" containerID="dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.937296 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.938770 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.938824 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.938839 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fb64a803f9be901ecd6c7d05e60e5374f2d9db672ed3596b55778d86fd68c0ec"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.942429 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.942479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.942494 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"1b5079413ad31c697864efec5783cee3e25faa96abe056ee80dd71e921058bd2"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.948784 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"578528d400aa9aad050fa8c365b0c97ba6d8f87e8ae767da32a43baa2c215f38"} Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.949251 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kccqx\" (UniqueName: \"kubernetes.io/projected/fe475131-3b65-45aa-a877-190a8bdec86f-kube-api-access-kccqx\") pod \"multus-additional-cni-plugins-fgp7j\" (UID: \"fe475131-3b65-45aa-a877-190a8bdec86f\") " pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.951623 4793 scope.go:117] "RemoveContainer" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.951756 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmlxm\" (UniqueName: \"kubernetes.io/projected/d3e7b749-a397-4db6-8b6e-ddde6b3fdced-kube-api-access-mmlxm\") pod \"multus-7k9v7\" (UID: \"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\") " pod="openshift-multus/multus-7k9v7" Jan 27 20:03:17 crc kubenswrapper[4793]: E0127 20:03:17.951767 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.951891 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.952339 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:17Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.961442 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpdjd\" (UniqueName: \"kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd\") pod \"ovnkube-node-8glmz\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.964509 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:17Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.977241 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:17Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.987941 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:17Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:17 crc kubenswrapper[4793]: I0127 20:03:17.992762 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.002310 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: W0127 20:03:18.003478 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fb300f8_bf40_4c4e_a3e5_4d5149177aae.slice/crio-18bc0fc7af540440b2e36178c14dd860d734ce17106f76ee7c845a89fb069dfd WatchSource:0}: Error finding container 18bc0fc7af540440b2e36178c14dd860d734ce17106f76ee7c845a89fb069dfd: Status 404 returned error can't find the container with id 18bc0fc7af540440b2e36178c14dd860d734ce17106f76ee7c845a89fb069dfd Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.014393 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.018738 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-7k9v7" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.023921 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.030893 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.043300 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: W0127 20:03:18.047345 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3e7b749_a397_4db6_8b6e_ddde6b3fdced.slice/crio-fcd935545fb182c5f08c009fe852ed29e65e50a9ae959559d9883bf799a81c69 WatchSource:0}: Error finding container fcd935545fb182c5f08c009fe852ed29e65e50a9ae959559d9883bf799a81c69: Status 404 returned error can't find the container with id fcd935545fb182c5f08c009fe852ed29e65e50a9ae959559d9883bf799a81c69 Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.058393 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.092447 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:09Z\\\",\\\"message\\\":\\\"W0127 20:02:59.197818 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0127 20:02:59.198205 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769544179 cert, and key in /tmp/serving-cert-819727762/serving-signer.crt, /tmp/serving-cert-819727762/serving-signer.key\\\\nI0127 20:02:59.622901 1 observer_polling.go:159] Starting file observer\\\\nW0127 20:02:59.627211 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0127 20:02:59.627348 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:02:59.629352 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-819727762/tls.crt::/tmp/serving-cert-819727762/tls.key\\\\\\\"\\\\nF0127 20:03:09.931853 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.108162 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.159479 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.168537 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.176386 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.186590 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.438153 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.438275 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.438302 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438368 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:20.438340378 +0000 UTC m=+25.828593534 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438389 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.438443 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.438470 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438583 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438606 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438658 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438679 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438685 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438698 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438710 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438617 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:20.438609014 +0000 UTC m=+25.828862170 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438743 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:20.438737087 +0000 UTC m=+25.828990243 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438756 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:20.438749787 +0000 UTC m=+25.829002943 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.438770 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:20.438762838 +0000 UTC m=+25.829015994 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.786176 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 22:11:07.038858935 +0000 UTC Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.803972 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.804290 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.805820 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.805987 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.806127 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.806389 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.886903 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.907810 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.914942 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.917400 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.930454 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.941866 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.970581 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.973133 4793 scope.go:117] "RemoveContainer" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" Jan 27 20:03:18 crc kubenswrapper[4793]: E0127 20:03:18.973309 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.975039 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerStarted","Data":"d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.975067 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerStarted","Data":"fcd935545fb182c5f08c009fe852ed29e65e50a9ae959559d9883bf799a81c69"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.976867 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" exitCode=0 Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.976913 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.976929 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"18bc0fc7af540440b2e36178c14dd860d734ce17106f76ee7c845a89fb069dfd"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.978504 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.979372 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerStarted","Data":"139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.979415 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerStarted","Data":"c2aa926540a70247bb08a7207716d0df3654b42c6f83d170edf89348f4c3368a"} Jan 27 20:03:18 crc kubenswrapper[4793]: I0127 20:03:18.994303 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:18Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.010584 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.021249 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.040242 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.057841 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.076272 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.091161 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc4399f4d91a9ce597c713fd26c9d1205279ecf5650f9955f9986fb6131faae4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:09Z\\\",\\\"message\\\":\\\"W0127 20:02:59.197818 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0127 20:02:59.198205 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769544179 cert, and key in /tmp/serving-cert-819727762/serving-signer.crt, /tmp/serving-cert-819727762/serving-signer.key\\\\nI0127 20:02:59.622901 1 observer_polling.go:159] Starting file observer\\\\nW0127 20:02:59.627211 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0127 20:02:59.627348 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:02:59.629352 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-819727762/tls.crt::/tmp/serving-cert-819727762/tls.key\\\\\\\"\\\\nF0127 20:03:09.931853 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.105369 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.196988 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.215705 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.244730 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.256635 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.275625 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.291522 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.310318 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.368172 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.389858 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.449305 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-tl72n"] Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.449725 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.460835 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.494430 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.494740 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.460959 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.494640 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.497381 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8knb\" (UniqueName: \"kubernetes.io/projected/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-kube-api-access-j8knb\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.497478 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-host\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.497660 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-serviceca\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.516966 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.529250 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.549305 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.574145 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.598872 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-serviceca\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.598993 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8knb\" (UniqueName: \"kubernetes.io/projected/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-kube-api-access-j8knb\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.599038 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-host\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.599121 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-host\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.600342 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-serviceca\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.613292 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.637795 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8knb\" (UniqueName: \"kubernetes.io/projected/bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6-kube-api-access-j8knb\") pod \"node-ca-tl72n\" (UID: \"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\") " pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.629574 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.664458 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.688086 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.703646 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.723098 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.743739 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.761020 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.784321 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.787212 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 18:52:19.98356455 +0000 UTC Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.797063 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.809267 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tl72n" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.811316 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.827274 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.846081 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.860284 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.894865 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.924607 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:19Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.986351 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tl72n" event={"ID":"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6","Type":"ContainerStarted","Data":"f9b89a6a22b5307df8881f4b31747372de23bae5fbbbc57b9a2054da5cdd9150"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.988805 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.995757 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.995821 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.995838 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.995850 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:03:19 crc kubenswrapper[4793]: I0127 20:03:19.995864 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.003954 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.024685 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.042809 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.059773 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.078139 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.098943 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.120478 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.189450 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.265343 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.388528 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.420563 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.432786 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.445507 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.468899 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.485805 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:20Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.536601 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.536757 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536781 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:24.536750427 +0000 UTC m=+29.927003663 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.536828 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.536900 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536921 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536945 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536957 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536973 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.536987 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537007 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.536926 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537028 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537038 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537009 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:24.536992203 +0000 UTC m=+29.927245359 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537098 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:24.537082995 +0000 UTC m=+29.927336231 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537118 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:24.537108925 +0000 UTC m=+29.927362171 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.537143 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:24.537136246 +0000 UTC m=+29.927389532 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.787861 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 09:02:06.589554411 +0000 UTC Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.803261 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.803278 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.803410 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:20 crc kubenswrapper[4793]: I0127 20:03:20.803297 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.803502 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:20 crc kubenswrapper[4793]: E0127 20:03:20.803695 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.000695 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.001861 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tl72n" event={"ID":"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6","Type":"ContainerStarted","Data":"bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1"} Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.002971 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54" exitCode=0 Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.003034 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54"} Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.038022 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.093209 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.104735 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.116376 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.131696 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.154282 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.168616 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.180752 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.190572 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.201673 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.214158 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.236821 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.249990 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.271816 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.284558 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.296798 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.311751 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.331686 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.347786 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.371900 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.391018 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.404821 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.421455 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.446839 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.460576 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.475879 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.494825 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.508178 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.525830 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.539261 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:21Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:21 crc kubenswrapper[4793]: I0127 20:03:21.794517 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 08:00:50.319309057 +0000 UTC Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.011104 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerStarted","Data":"c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.034589 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.046328 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.064964 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.078571 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.095616 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.110459 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.129217 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.142059 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.156598 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.171968 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.209822 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.221622 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.234055 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.245812 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.256729 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.290726 4793 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.292710 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.292766 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.292779 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.292895 4793 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.300070 4793 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.300354 4793 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.301612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.301667 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.301680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.301696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.301711 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.321278 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.325066 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.325115 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.325127 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.325144 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.325156 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.340903 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.345201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.345245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.345256 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.345275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.345287 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.358747 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.362316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.362374 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.362395 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.362420 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.362434 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.376675 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.381132 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.381183 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.381194 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.381213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.381224 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.392865 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:22Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.393012 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.394561 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.394597 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.394605 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.394619 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.394628 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.497060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.497115 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.497131 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.497153 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.497175 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.599150 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.599187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.599198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.599214 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.599225 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.702111 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.702247 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.702271 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.702296 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.702357 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.795457 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 11:52:44.00388674 +0000 UTC Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.802728 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.802781 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.802808 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.802893 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.802986 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:22 crc kubenswrapper[4793]: E0127 20:03:22.803735 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.804605 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.804640 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.804653 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.804669 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.804682 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.907431 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.907467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.907478 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.907496 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:22 crc kubenswrapper[4793]: I0127 20:03:22.907508 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:22Z","lastTransitionTime":"2026-01-27T20:03:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.009491 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.009522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.009530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.009557 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.009565 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.017406 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.019239 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf" exitCode=0 Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.019269 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.055961 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.076083 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.090822 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.104243 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.112251 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.112327 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.112341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.112358 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.112371 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.120214 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.134251 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.150502 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.168602 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.179319 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.192087 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.205248 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.215147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.215207 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.215218 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.215239 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.215250 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.219111 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.233090 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.253566 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.267624 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.317524 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.317696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.317706 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.317718 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.317726 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.420229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.420282 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.420294 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.420313 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.420325 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.522764 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.522796 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.522806 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.522819 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.522828 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.625000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.625064 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.625074 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.625094 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.625105 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.728104 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.728164 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.728176 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.728197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.728213 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.830752 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.830792 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.830802 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.830821 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.830833 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.933923 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.933983 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.934002 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.934028 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:23 crc kubenswrapper[4793]: I0127 20:03:23.934047 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:23Z","lastTransitionTime":"2026-01-27T20:03:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.036853 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.036900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.036913 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.036931 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.036947 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.139476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.139508 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.139519 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.139536 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.139564 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.178569 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 03:31:33.678243072 +0000 UTC Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.178727 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.178837 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.184178 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90" exitCode=0 Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.184228 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.197035 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.197800 4793 scope.go:117] "RemoveContainer" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.197979 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.205751 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.222628 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.238819 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.241624 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.241675 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.241689 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.241707 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.241722 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.249249 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.263541 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.273149 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.287345 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.300816 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.319937 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.332971 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.343621 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.343660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.343672 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.343691 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.343705 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.344081 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.362793 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.379418 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.406118 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.418742 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.446708 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.446752 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.446762 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.446778 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.446789 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.548818 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.548860 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.548872 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.548934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.548948 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.582924 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.583137 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.583189 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.583247 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.583275 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583426 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583511 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.58349247 +0000 UTC m=+37.973745626 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583637 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.583622883 +0000 UTC m=+37.973876039 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583747 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583766 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583792 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583806 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583820 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.583803987 +0000 UTC m=+37.974057143 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583845 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.583832988 +0000 UTC m=+37.974086234 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583898 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583910 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583918 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.583939 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.58393349 +0000 UTC m=+37.974186646 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.651524 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.651583 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.651593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.651606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.651615 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.758803 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.758852 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.758864 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.758876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.758886 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.802384 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.802386 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.802857 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:24 crc kubenswrapper[4793]: E0127 20:03:24.802717 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.862385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.862808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.862824 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.862843 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.862857 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.966040 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.966083 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.966094 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.966109 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:24 crc kubenswrapper[4793]: I0127 20:03:24.966143 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:24Z","lastTransitionTime":"2026-01-27T20:03:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.068685 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.068724 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.068735 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.068750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.068758 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.171018 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.171053 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.171061 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.171074 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.171084 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.178680 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 17:49:14.099556176 +0000 UTC Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.191924 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.192240 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.195778 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3" exitCode=0 Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.195816 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.211952 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.225766 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.233021 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.247730 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.287172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.287234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.287245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.287284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.287295 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.306522 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.332336 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.346958 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.369994 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.381713 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.388958 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.389004 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.389016 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.389034 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.389047 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.392351 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.405304 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.417495 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.429351 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.442112 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.454135 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.472012 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.483414 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.497130 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.497169 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.497183 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.497200 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.497212 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.502728 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.516785 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.613984 4793 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.620079 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.620149 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.620167 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.620196 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.620217 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.722144 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.722177 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.722185 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.722197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.722206 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.802602 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:25 crc kubenswrapper[4793]: E0127 20:03:25.802724 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.825033 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.825078 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.825090 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.825105 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.825117 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.928192 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.928232 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.928241 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.928256 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:25 crc kubenswrapper[4793]: I0127 20:03:25.928267 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:25Z","lastTransitionTime":"2026-01-27T20:03:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.030864 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.031245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.031262 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.031278 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.031290 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.133079 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.133110 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.133119 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.133133 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.133145 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.179766 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 14:48:02.762880066 +0000 UTC Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.200875 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerStarted","Data":"82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.200923 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.201526 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.234452 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.235256 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.235276 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.235284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.235295 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.235304 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.338331 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.338392 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.338419 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.338443 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.338461 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.440921 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.440968 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.440977 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.440992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.441003 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.544681 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.544721 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.544729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.544744 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.544755 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.639658 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.647957 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.647996 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.648010 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.648026 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.648040 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.656987 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.675701 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.686016 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.700716 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.712441 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.730246 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.747178 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.751383 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.751439 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.751451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.751468 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.751480 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.759421 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.772249 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.785379 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.800924 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.803116 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:26 crc kubenswrapper[4793]: E0127 20:03:26.803247 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.803318 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:26 crc kubenswrapper[4793]: E0127 20:03:26.803526 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.815561 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.827399 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.841728 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.854303 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.854599 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.854716 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.854827 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.854932 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.856270 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.870247 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.890333 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.903200 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.920375 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.932907 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.945743 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.955885 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.957357 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.957388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.957400 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.957414 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.957424 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:26Z","lastTransitionTime":"2026-01-27T20:03:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.968104 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:26 crc kubenswrapper[4793]: I0127 20:03:26.986145 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.005560 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.019246 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.030707 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.047091 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.059092 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.060352 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.060396 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.060409 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.060425 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.060436 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.074036 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.090594 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.104969 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.116009 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.128313 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.149089 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162335 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162727 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162765 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162775 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162789 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.162799 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.175554 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.180762 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 10:07:40.876712913 +0000 UTC Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.184976 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.200985 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.208447 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb" exitCode=0 Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.208510 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.208654 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.216046 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.231104 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.248328 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.261649 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.265780 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.265816 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.265827 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.265861 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.265870 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.276437 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.296704 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.312195 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.325289 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.336419 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.351953 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.362745 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.368908 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.368967 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.368984 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.369024 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.369036 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.373852 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.389066 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.403487 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.418471 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.431864 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.457521 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:27Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.471292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.471333 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.471341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.471355 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.471364 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.573595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.573636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.573645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.573659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.573668 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.675595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.675631 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.675642 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.675685 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.675696 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.778241 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.778277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.778288 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.778304 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.778315 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.803239 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:27 crc kubenswrapper[4793]: E0127 20:03:27.803381 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.880582 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.880609 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.880618 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.880630 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.880638 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.982836 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.982876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.982888 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.982903 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:27 crc kubenswrapper[4793]: I0127 20:03:27.982912 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:27Z","lastTransitionTime":"2026-01-27T20:03:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.085152 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.085198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.085207 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.085225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.085235 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.181292 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 21:17:11.939548145 +0000 UTC Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.186739 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.186783 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.186797 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.186812 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.186823 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.215209 4793 generic.go:334] "Generic (PLEG): container finished" podID="fe475131-3b65-45aa-a877-190a8bdec86f" containerID="e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b" exitCode=0 Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.215318 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerDied","Data":"e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.215384 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.241139 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.253017 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.265709 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.282350 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.289535 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.289609 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.289625 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.289645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.289657 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.297423 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.315117 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.327741 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.340782 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.354313 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.368447 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.383362 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.392456 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.392488 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.392497 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.392512 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.392523 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.394664 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.413866 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.432410 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.445063 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:28Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.494859 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.494894 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.494903 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.494947 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.494956 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.597205 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.597265 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.597284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.597309 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.597327 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.699213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.699450 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.699540 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.699659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.699780 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802204 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802274 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:28 crc kubenswrapper[4793]: E0127 20:03:28.802299 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:28 crc kubenswrapper[4793]: E0127 20:03:28.802455 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802753 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802788 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802797 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802813 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.802829 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.905811 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.905849 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.905860 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.905876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:28 crc kubenswrapper[4793]: I0127 20:03:28.905885 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:28Z","lastTransitionTime":"2026-01-27T20:03:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.008399 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.008440 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.008450 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.008462 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.008471 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.110337 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.110376 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.110388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.110404 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.110414 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.182207 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 21:01:34.261621606 +0000 UTC Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.213364 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.213404 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.213415 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.213428 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.213440 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.219168 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/0.log" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.220994 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20" exitCode=1 Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.221054 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.221619 4793 scope.go:117] "RemoveContainer" containerID="8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.225070 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" event={"ID":"fe475131-3b65-45aa-a877-190a8bdec86f","Type":"ContainerStarted","Data":"4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.236734 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.252275 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.264437 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.281101 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.293528 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.307237 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.315712 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.315757 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.315768 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.315784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.315797 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.327018 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.348977 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.360896 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.379344 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.397753 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.409396 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.418854 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.418920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.418934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.418951 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.418963 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.422468 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.433218 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.448279 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.461541 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.475313 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.486483 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.505049 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.518410 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.521019 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.521061 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.521071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.521087 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.521097 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.531648 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.543906 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.557425 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.570608 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.585745 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.610379 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.624098 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.624586 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.624637 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.624665 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.624900 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.626394 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.649529 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.665512 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.678641 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:29Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.861123 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:29 crc kubenswrapper[4793]: E0127 20:03:29.861299 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.862521 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.862589 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.862608 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.862662 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.862678 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.965381 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.965441 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.965459 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.965482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:29 crc kubenswrapper[4793]: I0127 20:03:29.965498 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:29Z","lastTransitionTime":"2026-01-27T20:03:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.067800 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.067839 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.067849 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.067863 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.067873 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.170069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.170109 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.170119 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.170133 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.170142 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.183274 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 01:25:27.81913149 +0000 UTC Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.230855 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/0.log" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.233133 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.233263 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.245791 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.266670 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.274174 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.274229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.274245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.274266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.274281 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.283805 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.297934 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.309933 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.318746 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.329895 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.339995 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.356797 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.374790 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.376472 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.376511 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.376522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.376540 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.376577 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.386494 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.399513 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.413461 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.426863 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.444702 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.454327 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp"] Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.454934 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: W0127 20:03:30.456705 4793 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Jan 27 20:03:30 crc kubenswrapper[4793]: E0127 20:03:30.456753 4793 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 27 20:03:30 crc kubenswrapper[4793]: W0127 20:03:30.456847 4793 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Jan 27 20:03:30 crc kubenswrapper[4793]: E0127 20:03:30.456868 4793 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.472810 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.499857 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.499899 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.499912 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.499929 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.499939 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.513662 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.532493 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.551273 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.568278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.568338 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2d2b\" (UniqueName: \"kubernetes.io/projected/25666aca-21d3-4cae-8386-90aaaebd1a52-kube-api-access-b2d2b\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.568362 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25666aca-21d3-4cae-8386-90aaaebd1a52-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.568412 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-env-overrides\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.570859 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.600731 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.601655 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.601682 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.601690 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.601703 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.601712 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.613816 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.626484 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.644451 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.659927 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.669046 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.669093 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2d2b\" (UniqueName: \"kubernetes.io/projected/25666aca-21d3-4cae-8386-90aaaebd1a52-kube-api-access-b2d2b\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.669123 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25666aca-21d3-4cae-8386-90aaaebd1a52-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.669164 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-env-overrides\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.669768 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-env-overrides\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.670017 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25666aca-21d3-4cae-8386-90aaaebd1a52-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.670667 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.681628 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.691115 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2d2b\" (UniqueName: \"kubernetes.io/projected/25666aca-21d3-4cae-8386-90aaaebd1a52-kube-api-access-b2d2b\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.696041 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.703925 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.704110 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.704207 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.704320 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.704404 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.707429 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.717458 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.730064 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.802472 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:30 crc kubenswrapper[4793]: E0127 20:03:30.802665 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.802796 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:30 crc kubenswrapper[4793]: E0127 20:03:30.802985 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.807140 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.807176 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.807186 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.807204 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.807217 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.909952 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.910213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.910278 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.910341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:30 crc kubenswrapper[4793]: I0127 20:03:30.910428 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:30Z","lastTransitionTime":"2026-01-27T20:03:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.012807 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.012839 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.012848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.012862 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.012872 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.115084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.115112 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.115122 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.115134 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.115143 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.183842 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 01:37:36.373449012 +0000 UTC Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.217563 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.217606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.217614 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.217627 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.217636 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.237794 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/1.log" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.238484 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/0.log" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.241130 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684" exitCode=1 Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.241175 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.241217 4793 scope.go:117] "RemoveContainer" containerID="8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.241862 4793 scope.go:117] "RemoveContainer" containerID="7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684" Jan 27 20:03:31 crc kubenswrapper[4793]: E0127 20:03:31.242043 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.255012 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.268377 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.279905 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.291053 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.301616 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.319901 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.319949 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.319962 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.319979 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.319990 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.320437 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.331836 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.349850 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.352017 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.363541 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25666aca-21d3-4cae-8386-90aaaebd1a52-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-52dvp\" (UID: \"25666aca-21d3-4cae-8386-90aaaebd1a52\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.373158 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.380150 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.388300 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.389506 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: W0127 20:03:31.402031 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25666aca_21d3_4cae_8386_90aaaebd1a52.slice/crio-6181b403cdd9c7ed70e4ca229d959a2c19de33d21368b093dd5c69752fb85f59 WatchSource:0}: Error finding container 6181b403cdd9c7ed70e4ca229d959a2c19de33d21368b093dd5c69752fb85f59: Status 404 returned error can't find the container with id 6181b403cdd9c7ed70e4ca229d959a2c19de33d21368b093dd5c69752fb85f59 Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.407348 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.421352 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.424505 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.424579 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.424593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.424614 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.424624 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.436074 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.453928 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.468975 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.480333 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.527511 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.527706 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.527728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.527750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.527768 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.561845 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-gsrf9"] Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.562608 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: E0127 20:03:31.562720 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.573454 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.578205 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lkzm\" (UniqueName: \"kubernetes.io/projected/93412db5-52e2-4b3a-aee4-3c43f090750e-kube-api-access-2lkzm\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.578271 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.588400 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.609953 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.624623 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.629508 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.629558 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.629571 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.629589 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.629600 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.640469 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.652990 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.666477 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.679444 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.679503 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lkzm\" (UniqueName: \"kubernetes.io/projected/93412db5-52e2-4b3a-aee4-3c43f090750e-kube-api-access-2lkzm\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: E0127 20:03:31.679875 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:31 crc kubenswrapper[4793]: E0127 20:03:31.679974 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:03:32.179951868 +0000 UTC m=+37.570205024 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.681617 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.693446 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.705520 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lkzm\" (UniqueName: \"kubernetes.io/projected/93412db5-52e2-4b3a-aee4-3c43f090750e-kube-api-access-2lkzm\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.708535 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.722647 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.732204 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.732236 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.732246 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.732265 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.732279 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.737293 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.751430 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.765268 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.778100 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.794951 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.803391 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:31 crc kubenswrapper[4793]: E0127 20:03:31.803514 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.811914 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:31Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.835383 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.835432 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.835442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.835463 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.835474 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.937867 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.937898 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.937909 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.937922 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:31 crc kubenswrapper[4793]: I0127 20:03:31.937932 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:31Z","lastTransitionTime":"2026-01-27T20:03:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.039967 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.039999 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.040010 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.040024 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.040033 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.141878 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.141910 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.141918 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.141931 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.141939 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.184348 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.184382 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 00:43:47.205950387 +0000 UTC Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.184496 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.184617 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:03:33.184594578 +0000 UTC m=+38.574847804 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.244535 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.244618 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.244628 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.244642 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.244653 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.246606 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/1.log" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.250831 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" event={"ID":"25666aca-21d3-4cae-8386-90aaaebd1a52","Type":"ContainerStarted","Data":"b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.250895 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" event={"ID":"25666aca-21d3-4cae-8386-90aaaebd1a52","Type":"ContainerStarted","Data":"6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.250906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" event={"ID":"25666aca-21d3-4cae-8386-90aaaebd1a52","Type":"ContainerStarted","Data":"6181b403cdd9c7ed70e4ca229d959a2c19de33d21368b093dd5c69752fb85f59"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.265088 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.277808 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.290005 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.301862 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.315253 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.331727 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.343429 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.347213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.347267 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.347281 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.347301 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.347314 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.354062 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.364946 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.373983 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.384755 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.394105 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.406688 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.424802 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.436903 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.449371 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.449443 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.449451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.449465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.449475 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.452514 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.464800 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.551832 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.551873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.551881 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.551895 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.551908 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.587149 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587278 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:03:48.587250563 +0000 UTC m=+53.977503719 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.587350 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.587372 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.587400 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.587423 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587533 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587601 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587613 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587630 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587540 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587736 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587665 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:48.587648392 +0000 UTC m=+53.977901618 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587784 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587804 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587826 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:48.587785487 +0000 UTC m=+53.978038713 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587845 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:48.587835858 +0000 UTC m=+53.978089104 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.587881 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:03:48.587855618 +0000 UTC m=+53.978108834 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.613968 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.614011 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.614053 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.614069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.614080 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.625930 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.632449 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.632493 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.632511 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.632527 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.632536 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.645518 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.648668 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.648708 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.648718 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.648732 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.648742 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.660138 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.663507 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.663530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.663538 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.663566 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.663576 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.676865 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.679972 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.679995 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.680003 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.680015 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.680023 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.691346 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:32Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.691488 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.693000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.693038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.693050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.693067 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.693960 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.797645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.797720 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.797738 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.797762 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.797779 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.802924 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.803058 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.802928 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:32 crc kubenswrapper[4793]: E0127 20:03:32.803156 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.899989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.900056 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.900079 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.900105 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:32 crc kubenswrapper[4793]: I0127 20:03:32.900139 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:32Z","lastTransitionTime":"2026-01-27T20:03:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.003714 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.003784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.003800 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.003820 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.003835 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.107078 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.107134 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.107143 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.107161 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.107180 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.184761 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 23:29:38.212861615 +0000 UTC Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.193603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:33 crc kubenswrapper[4793]: E0127 20:03:33.193799 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:33 crc kubenswrapper[4793]: E0127 20:03:33.193912 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:03:35.193880237 +0000 UTC m=+40.584133433 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.210022 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.210066 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.210078 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.210093 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.210103 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.312006 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.312041 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.312050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.312064 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.312072 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.414876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.414921 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.414932 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.414958 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.414969 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.517368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.517416 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.517430 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.517468 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.517479 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.620304 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.620709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.620721 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.620735 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.620744 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.724274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.724598 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.724668 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.724739 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.724870 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.803085 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:33 crc kubenswrapper[4793]: E0127 20:03:33.803315 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.803442 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:33 crc kubenswrapper[4793]: E0127 20:03:33.803668 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.827516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.827737 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.827759 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.827781 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.827797 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.931332 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.931388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.931405 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.931427 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:33 crc kubenswrapper[4793]: I0127 20:03:33.931440 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:33Z","lastTransitionTime":"2026-01-27T20:03:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.034669 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.034717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.034729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.034745 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.034775 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.137363 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.137408 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.137419 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.137437 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.137448 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.185508 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 02:43:06.883159175 +0000 UTC Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.239986 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.240023 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.240032 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.240045 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.240057 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.342140 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.342211 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.342233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.342262 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.342291 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.445153 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.445198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.445214 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.445234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.445251 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.547359 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.547391 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.547400 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.547413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.547422 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.649691 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.649740 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.649757 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.649780 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.649797 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.751778 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.751812 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.751821 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.751835 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.751846 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.802739 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:34 crc kubenswrapper[4793]: E0127 20:03:34.802905 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.802742 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:34 crc kubenswrapper[4793]: E0127 20:03:34.803028 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.854169 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.854219 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.854232 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.854250 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.854263 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.957734 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.957797 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.957819 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.957846 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:34 crc kubenswrapper[4793]: I0127 20:03:34.957870 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:34Z","lastTransitionTime":"2026-01-27T20:03:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.061045 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.061079 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.061089 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.061101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.061111 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.164133 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.164752 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.164836 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.164930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.165007 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.186488 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 04:31:09.29174379 +0000 UTC Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.210506 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:35 crc kubenswrapper[4793]: E0127 20:03:35.211402 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:35 crc kubenswrapper[4793]: E0127 20:03:35.211519 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:03:39.211489542 +0000 UTC m=+44.601742728 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.268227 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.268475 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.268540 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.268641 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.268864 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.371537 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.371804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.371890 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.371975 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.372106 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.474928 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.475000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.475022 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.475052 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.475075 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.577384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.577440 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.577455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.577478 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.577493 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.679220 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.679261 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.679270 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.679284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.679294 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.782134 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.782183 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.782200 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.782223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.782239 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.802730 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:35 crc kubenswrapper[4793]: E0127 20:03:35.802946 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.803465 4793 scope.go:117] "RemoveContainer" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.803683 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:35 crc kubenswrapper[4793]: E0127 20:03:35.803776 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.821208 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.850417 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.864232 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.874650 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.886991 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.888151 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.888192 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.888202 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.888249 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.888263 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.899069 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.909067 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.926157 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.937745 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.951980 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.971726 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.983985 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.989970 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.990026 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.990038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.990054 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.990066 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:35Z","lastTransitionTime":"2026-01-27T20:03:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:35 crc kubenswrapper[4793]: I0127 20:03:35.996859 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:35Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.010361 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.023141 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.034337 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.047974 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.092219 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.092263 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.092272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.092288 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.092297 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.186747 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 10:16:02.683013373 +0000 UTC Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.194275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.194306 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.194315 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.194327 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.194336 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.268005 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.270285 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.270851 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.283783 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.296950 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.296996 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.297007 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.297026 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.297038 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.301520 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.321875 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.333768 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.348357 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.357534 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.367638 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.377838 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.386360 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.398859 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.399178 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.399213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.399223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.399247 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.399258 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.413837 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.431801 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.443307 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.453968 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.466386 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.484503 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.506695 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:36Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.507777 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.507808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.507818 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.507834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.507845 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.609595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.609613 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.609621 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.609632 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.609639 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.712009 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.712051 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.712062 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.712077 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.712086 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.802525 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.802525 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:36 crc kubenswrapper[4793]: E0127 20:03:36.802980 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:36 crc kubenswrapper[4793]: E0127 20:03:36.803154 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.814122 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.814171 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.814187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.814208 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.814224 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.916885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.916951 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.916969 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.916991 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:36 crc kubenswrapper[4793]: I0127 20:03:36.917008 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:36Z","lastTransitionTime":"2026-01-27T20:03:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.107120 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.107205 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.107218 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.107234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.107245 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.187288 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 19:00:59.759766045 +0000 UTC Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.209283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.209323 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.209331 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.209348 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.209358 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.311824 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.311866 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.311877 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.311894 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.311909 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.415128 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.415173 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.415183 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.415198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.415210 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.517989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.518067 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.518078 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.518091 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.518102 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.620403 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.620450 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.620461 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.620477 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.620490 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.722279 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.722313 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.722321 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.722338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.722348 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.802995 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.803085 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:37 crc kubenswrapper[4793]: E0127 20:03:37.803217 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:37 crc kubenswrapper[4793]: E0127 20:03:37.803308 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.824824 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.824873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.824886 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.824905 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.824916 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.928084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.928167 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.928206 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.928240 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:37 crc kubenswrapper[4793]: I0127 20:03:37.928263 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:37Z","lastTransitionTime":"2026-01-27T20:03:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.030431 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.030469 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.030499 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.030516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.030527 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.133574 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.133612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.133620 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.133633 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.133642 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.187924 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 18:40:52.71915265 +0000 UTC Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.236793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.236843 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.236860 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.236883 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.236901 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.338894 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.338942 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.338951 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.338965 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.338973 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.445497 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.445584 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.445596 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.445618 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.445631 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.549308 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.549374 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.549390 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.549414 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.549431 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.652305 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.652392 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.652417 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.652449 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.652473 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.755894 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.755930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.755939 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.755953 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.755962 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.803049 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:38 crc kubenswrapper[4793]: E0127 20:03:38.803212 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.803396 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:38 crc kubenswrapper[4793]: E0127 20:03:38.803691 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.859269 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.859316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.859329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.859345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.859358 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.962271 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.962355 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.962367 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.962388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:38 crc kubenswrapper[4793]: I0127 20:03:38.962401 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:38Z","lastTransitionTime":"2026-01-27T20:03:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.064849 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.064889 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.064901 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.064916 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.064927 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.168044 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.168104 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.168117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.168144 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.168156 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.188311 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 17:56:46.250012465 +0000 UTC Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.251874 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:39 crc kubenswrapper[4793]: E0127 20:03:39.252058 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:39 crc kubenswrapper[4793]: E0127 20:03:39.252149 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:03:47.252127326 +0000 UTC m=+52.642380482 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.270359 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.270401 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.270411 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.270429 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.270445 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.373325 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.373379 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.373394 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.373413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.373425 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.476142 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.476184 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.476197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.476217 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.476230 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.579715 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.579750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.579761 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.579777 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.579788 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.682459 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.682516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.682526 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.682564 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.682578 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.785263 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.785323 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.785338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.785359 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.785375 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.802872 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.802919 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:39 crc kubenswrapper[4793]: E0127 20:03:39.803006 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:39 crc kubenswrapper[4793]: E0127 20:03:39.803109 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.888647 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.888730 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.888754 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.888784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.888810 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.992156 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.992201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.992214 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.992230 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:39 crc kubenswrapper[4793]: I0127 20:03:39.992241 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:39Z","lastTransitionTime":"2026-01-27T20:03:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.095471 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.095529 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.095540 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.095577 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.095591 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.189246 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 19:36:44.90675664 +0000 UTC Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.198399 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.198440 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.198451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.198465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.198474 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.300779 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.300846 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.300864 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.300890 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.300911 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.403280 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.403350 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.403369 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.403395 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.403417 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.506229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.506298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.506312 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.506335 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.506353 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.609607 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.609670 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.609685 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.609709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.609725 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.712952 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.713009 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.713020 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.713039 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.713051 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.802982 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.803103 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:40 crc kubenswrapper[4793]: E0127 20:03:40.803119 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:40 crc kubenswrapper[4793]: E0127 20:03:40.803317 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.816696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.816769 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.816786 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.816811 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.816825 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.919884 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.919964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.919977 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.920003 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:40 crc kubenswrapper[4793]: I0127 20:03:40.920017 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:40Z","lastTransitionTime":"2026-01-27T20:03:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.043077 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.043118 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.043131 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.043148 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.043156 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.145846 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.145891 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.145901 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.145920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.145931 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.190334 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 09:26:58.839065163 +0000 UTC Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.250903 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.250937 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.250947 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.250961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.250972 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.353181 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.353237 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.353255 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.353277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.353292 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.456357 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.456441 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.456468 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.456498 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.456522 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.559619 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.559678 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.559688 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.559705 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.559716 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.662676 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.662721 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.662733 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.662751 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.662764 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.765371 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.765402 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.765410 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.765423 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.765431 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.802231 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:41 crc kubenswrapper[4793]: E0127 20:03:41.802363 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.802237 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:41 crc kubenswrapper[4793]: E0127 20:03:41.802443 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.868589 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.868642 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.868654 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.868671 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.868684 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.971292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.971354 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.971370 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.971394 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:41 crc kubenswrapper[4793]: I0127 20:03:41.971413 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:41Z","lastTransitionTime":"2026-01-27T20:03:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.074172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.074202 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.074212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.074225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.074234 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.176253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.176302 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.176313 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.176329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.176343 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.190920 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 14:28:21.54726982 +0000 UTC Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.278714 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.278773 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.278783 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.278800 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.278811 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.381163 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.381202 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.381211 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.381226 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.381237 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.483678 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.483710 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.483717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.483730 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.483739 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.586272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.586318 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.586330 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.586347 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.586357 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.688660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.688730 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.688750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.688778 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.688798 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.791632 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.791687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.791698 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.791717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.791728 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.803154 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.803240 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.803309 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.803384 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.891623 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.891695 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.891707 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.891720 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.891729 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.904615 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:42Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.909420 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.909467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.909476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.909492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.909504 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.926275 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:42Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.930287 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.930352 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.930373 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.930407 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.930432 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.947435 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:42Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.952165 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.952234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.952258 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.952286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.952306 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.970322 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:42Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.975501 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.975533 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.975557 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.975574 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.975584 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.990626 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:42Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:42 crc kubenswrapper[4793]: E0127 20:03:42.990848 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.992421 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.992445 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.992453 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.992465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:42 crc kubenswrapper[4793]: I0127 20:03:42.992472 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:42Z","lastTransitionTime":"2026-01-27T20:03:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.095537 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.095684 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.095716 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.095744 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.095765 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.191138 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 02:43:40.767854422 +0000 UTC Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.197700 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.197732 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.197743 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.197759 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.197772 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.300275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.300320 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.300330 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.300347 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.300361 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.402362 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.402413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.402426 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.402445 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.402461 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.505033 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.505072 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.505081 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.505099 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.505113 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.607755 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.607874 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.607887 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.607907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.607920 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.710813 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.710899 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.710934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.710969 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.710993 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.802301 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.802351 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:43 crc kubenswrapper[4793]: E0127 20:03:43.802493 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:43 crc kubenswrapper[4793]: E0127 20:03:43.802806 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.813274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.813338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.813361 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.813382 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.813400 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.915623 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.915658 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.915667 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.915680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:43 crc kubenswrapper[4793]: I0127 20:03:43.915689 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:43Z","lastTransitionTime":"2026-01-27T20:03:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.018175 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.018221 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.018233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.018250 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.018265 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.119961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.120013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.120022 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.120038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.120051 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.191581 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 07:47:32.161868713 +0000 UTC Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.222273 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.222340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.222363 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.222391 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.222414 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.324432 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.324493 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.324506 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.324522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.324532 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.427255 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.427293 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.427300 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.427313 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.427321 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.530019 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.530101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.530112 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.530137 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.530150 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.625052 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.632028 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.632804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.632840 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.632869 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.632881 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.635854 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.651663 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.673505 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.691636 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.701740 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.713972 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.724644 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.734676 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.734787 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.734799 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.734816 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.734827 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.740102 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.758233 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.769996 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.784154 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.801359 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.802575 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.802653 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:44 crc kubenswrapper[4793]: E0127 20:03:44.802673 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:44 crc kubenswrapper[4793]: E0127 20:03:44.802899 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.815013 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.829229 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.836601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.836642 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.836651 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.836666 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.836678 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.841950 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.858657 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.870569 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.882346 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:44Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.938427 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.938468 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.938478 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.938493 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:44 crc kubenswrapper[4793]: I0127 20:03:44.938503 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:44Z","lastTransitionTime":"2026-01-27T20:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.041025 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.041077 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.041089 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.041106 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.041118 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.143625 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.143667 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.143680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.143696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.143709 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.191739 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 00:33:56.070833427 +0000 UTC Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.246047 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.246088 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.246096 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.246110 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.246119 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.348802 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.348844 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.348853 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.348870 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.348883 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.452106 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.452161 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.452175 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.452195 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.452211 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.585079 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.585201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.585217 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.585240 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.585254 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.687985 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.688050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.688069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.688091 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.688108 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.790413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.790502 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.790513 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.790529 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.790541 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.802915 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.803064 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:45 crc kubenswrapper[4793]: E0127 20:03:45.803112 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:45 crc kubenswrapper[4793]: E0127 20:03:45.803217 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.821997 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.835873 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.848491 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.859645 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.875128 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.884901 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.893595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.893636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.893645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.893659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.893670 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.897855 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.907946 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.915991 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.926472 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.935945 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.945522 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.954498 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.964009 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.974056 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.991654 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8758b483b19c4f6b35ec2c412447bc74de79ad2f89d3b78e8ee26d2f0c930f20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"message\\\":\\\"7 20:03:28.843374 6020 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:28.843400 6020 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0127 20:03:28.843407 6020 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0127 20:03:28.843430 6020 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:28.843440 6020 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:28.843445 6020 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:28.843453 6020 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:28.843458 6020 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:28.843459 6020 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0127 20:03:28.843487 6020 factory.go:656] Stopping watch factory\\\\nI0127 20:03:28.843490 6020 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0127 20:03:28.843474 6020 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:28.843500 6020 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:28.843503 6020 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:28.843510 6020 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:45Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.996564 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.996598 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.996606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.996620 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:45 crc kubenswrapper[4793]: I0127 20:03:45.996629 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:45Z","lastTransitionTime":"2026-01-27T20:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.004614 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.014651 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.098740 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.098796 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.098809 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.098827 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.098841 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.191848 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 00:02:21.533282455 +0000 UTC Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.200647 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.200691 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.200701 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.200717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.200726 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.303040 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.303084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.303095 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.303111 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.303122 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.405499 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.405538 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.405566 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.405584 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.405597 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.509179 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.509223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.509234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.509250 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.509261 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.612100 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.612186 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.612199 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.612218 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.612229 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.719948 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.720065 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.721023 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.721141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.721161 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.803179 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.803236 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:46 crc kubenswrapper[4793]: E0127 20:03:46.803315 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:46 crc kubenswrapper[4793]: E0127 20:03:46.803785 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.804114 4793 scope.go:117] "RemoveContainer" containerID="7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.818709 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.824371 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.824408 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.824417 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.824434 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.824444 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.834275 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.847313 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.858963 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.871983 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.881871 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.894615 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.905201 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.925081 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.926711 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.926801 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.926888 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.927038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.927151 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:46Z","lastTransitionTime":"2026-01-27T20:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.938675 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.953601 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.972593 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:46 crc kubenswrapper[4793]: I0127 20:03:46.996949 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:46Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.011842 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.024205 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.030286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.030333 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.030347 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.030370 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.030383 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.036130 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.052794 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.064637 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.132668 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.132702 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.132710 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.132724 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.132735 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.191967 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 17:14:00.611299624 +0000 UTC Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.235632 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.235687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.235702 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.235717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.235727 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.295912 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.306392 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/1.log" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.309384 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.310005 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.328720 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.338853 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.338900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.338920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.338942 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.338956 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.340431 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.344930 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:47 crc kubenswrapper[4793]: E0127 20:03:47.345064 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:47 crc kubenswrapper[4793]: E0127 20:03:47.345113 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:04:03.34509765 +0000 UTC m=+68.735350816 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.353613 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.370571 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.386729 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.399158 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.420139 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.440195 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.441879 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.441905 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.441913 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.441926 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.441935 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.456939 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.471232 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.480134 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.489854 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.500741 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.510175 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.524381 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.538805 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.543811 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.543848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.543857 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.543870 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.543881 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.555057 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.568653 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:47Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.646458 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.646492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.646503 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.646519 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.646533 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.751228 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.751267 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.751276 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.751289 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.751298 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.802648 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.802737 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:47 crc kubenswrapper[4793]: E0127 20:03:47.802814 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:47 crc kubenswrapper[4793]: E0127 20:03:47.802910 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.853007 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.853048 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.853057 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.853073 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.853086 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.955660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.955705 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.955715 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.955728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:47 crc kubenswrapper[4793]: I0127 20:03:47.955739 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:47Z","lastTransitionTime":"2026-01-27T20:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.058583 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.058630 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.058643 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.058659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.058671 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.161176 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.161210 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.161224 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.161238 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.161247 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.192830 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 16:31:13.71856169 +0000 UTC Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.264401 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.264447 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.264457 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.264473 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.264485 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.367472 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.367513 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.367526 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.367554 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.367564 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.470603 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.470653 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.470664 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.470683 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.470695 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.573768 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.573804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.573812 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.573828 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.573840 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.656688 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.656819 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.656865 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.656917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.656951 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657069 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657183 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:04:20.657159134 +0000 UTC m=+86.047412300 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657079 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657286 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657279 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657374 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657390 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657309 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657473 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:04:20.657446871 +0000 UTC m=+86.047700027 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657517 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:04:20.657502382 +0000 UTC m=+86.047755758 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657089 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657587 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:04:20.657577784 +0000 UTC m=+86.047831190 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.657931 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:04:20.657893941 +0000 UTC m=+86.048147097 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.676211 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.676249 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.676260 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.676274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.676283 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.778570 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.778610 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.778619 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.778633 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.778642 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.802882 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.802927 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.803189 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:48 crc kubenswrapper[4793]: E0127 20:03:48.803301 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.881158 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.881214 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.881227 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.881247 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.881261 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.985059 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.985105 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.985113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.985126 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:48 crc kubenswrapper[4793]: I0127 20:03:48.985137 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:48Z","lastTransitionTime":"2026-01-27T20:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.087609 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.087644 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.087652 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.087664 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.087672 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.190210 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.190250 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.190259 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.190272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.190282 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.193404 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 19:55:47.838155639 +0000 UTC Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.292303 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.292356 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.292368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.292385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.292399 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.317824 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/2.log" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.318661 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/1.log" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.320928 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20" exitCode=1 Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.320968 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.320999 4793 scope.go:117] "RemoveContainer" containerID="7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.321869 4793 scope.go:117] "RemoveContainer" containerID="926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20" Jan 27 20:03:49 crc kubenswrapper[4793]: E0127 20:03:49.322056 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.341800 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.352881 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.366232 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.379572 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.392658 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.394857 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.394902 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.394915 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.394933 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.394951 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.408292 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.418668 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.434106 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.445594 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.453741 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.462826 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.475950 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.484585 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.495401 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.496596 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.496641 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.496651 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.496668 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.496677 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.506109 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.520150 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.537720 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.551321 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:49Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.599274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.599350 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.599366 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.599383 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.599397 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.701950 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.702086 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.702105 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.702128 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.702143 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.802345 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.802455 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:49 crc kubenswrapper[4793]: E0127 20:03:49.802659 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:49 crc kubenswrapper[4793]: E0127 20:03:49.802948 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.804976 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.805042 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.805054 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.805068 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.805081 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.908034 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.908098 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.908113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.908141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:49 crc kubenswrapper[4793]: I0127 20:03:49.908157 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:49Z","lastTransitionTime":"2026-01-27T20:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.010478 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.010536 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.010571 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.010590 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.010599 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.113298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.113343 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.113352 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.113369 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.113381 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.194040 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 12:23:09.792954526 +0000 UTC Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.215678 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.215758 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.215773 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.215804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.215820 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.320228 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.320288 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.320301 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.320321 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.320334 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.330022 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/2.log" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.422885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.422940 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.422952 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.422965 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.422974 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.525413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.525453 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.525465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.525482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.525494 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.628763 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.628865 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.628885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.628909 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.628927 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.732753 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.732797 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.732807 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.732824 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.732834 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.802477 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.802478 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:50 crc kubenswrapper[4793]: E0127 20:03:50.802706 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:50 crc kubenswrapper[4793]: E0127 20:03:50.802856 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.835527 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.835693 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.835728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.835803 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.835830 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.938762 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.938809 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.938820 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.938838 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:50 crc kubenswrapper[4793]: I0127 20:03:50.938850 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:50Z","lastTransitionTime":"2026-01-27T20:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.041934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.041991 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.042031 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.042070 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.042116 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.145293 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.145340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.145350 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.145365 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.145374 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.194623 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 03:18:24.342787818 +0000 UTC Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.248192 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.248234 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.248246 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.248261 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.248272 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.350925 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.350960 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.350969 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.350987 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.350996 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.452966 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.453004 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.453013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.453028 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.453037 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.555485 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.555542 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.555586 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.555606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.555621 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.658414 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.658455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.658467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.658484 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.658498 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.761137 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.761195 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.761208 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.761229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.761244 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.803153 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.803193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:51 crc kubenswrapper[4793]: E0127 20:03:51.803324 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:51 crc kubenswrapper[4793]: E0127 20:03:51.803437 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.863679 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.863746 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.863768 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.863796 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.863817 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.966741 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.966804 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.966814 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.966834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:51 crc kubenswrapper[4793]: I0127 20:03:51.966854 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:51Z","lastTransitionTime":"2026-01-27T20:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.071727 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.071782 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.071795 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.071814 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.071827 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.174407 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.174453 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.174467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.174483 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.174494 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.195809 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 02:32:37.141738787 +0000 UTC Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.277383 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.277440 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.277457 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.277473 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.277485 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.380775 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.380878 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.380895 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.380922 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.380940 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.484069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.484117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.484135 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.484152 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.484163 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.586426 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.586511 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.586523 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.586585 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.586600 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.689302 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.689353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.689366 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.689384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.689396 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.792229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.792275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.792284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.792297 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.792307 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.802692 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.802709 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:52 crc kubenswrapper[4793]: E0127 20:03:52.802831 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:52 crc kubenswrapper[4793]: E0127 20:03:52.802903 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.894593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.894657 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.894669 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.894688 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.894718 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.997407 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.997447 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.997461 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.997479 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:52 crc kubenswrapper[4793]: I0127 20:03:52.997491 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:52Z","lastTransitionTime":"2026-01-27T20:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.100341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.100388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.100398 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.100414 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.100427 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.130668 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.136464 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.136491 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.136509 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.136527 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.136536 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.147344 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.153407 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.157834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.157932 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.157991 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.158080 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.158095 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.158148 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.169919 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.171883 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.174179 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.174353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.174414 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.174482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.174538 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.189425 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.191905 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.195939 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 11:04:33.226675484 +0000 UTC Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.196113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.196140 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.196151 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.196169 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.196181 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.202920 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.212109 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.216612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.216647 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.216658 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.216677 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.216688 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.217952 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.230201 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.230308 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.231869 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.231900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.231914 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.231930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.231942 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.239471 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.254593 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.264295 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.275180 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.287222 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.297729 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.312560 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.325763 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.334731 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.334808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.334857 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.334886 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.334903 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.339563 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.361113 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.374678 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.391110 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:53Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.436877 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.436961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.436995 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.437012 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.437022 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.538996 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.539055 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.539071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.539093 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.539108 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.641534 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.641629 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.641645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.641666 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.641678 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.744945 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.745277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.745408 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.745529 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.745655 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.803023 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.803139 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.803835 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:53 crc kubenswrapper[4793]: E0127 20:03:53.804053 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.848538 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.848593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.848604 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.848623 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.848647 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.951807 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.951863 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.951885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.951913 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:53 crc kubenswrapper[4793]: I0127 20:03:53.951930 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:53Z","lastTransitionTime":"2026-01-27T20:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.056201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.056375 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.056412 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.056456 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.056482 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.159038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.159113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.159125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.159143 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.159158 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.197025 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 22:07:12.947091512 +0000 UTC Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.262009 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.262088 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.262113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.262144 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.262169 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.364618 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.364644 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.364652 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.364665 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.364676 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.467404 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.467500 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.467520 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.467542 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.467602 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.570783 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.570920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.570939 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.570964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.570982 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.674279 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.674322 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.674349 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.674369 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.674383 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.777513 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.777580 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.777593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.777609 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.777620 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.803128 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:54 crc kubenswrapper[4793]: E0127 20:03:54.803288 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.803199 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:54 crc kubenswrapper[4793]: E0127 20:03:54.803384 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.883394 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.883442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.883452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.883467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.883475 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.985292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.985345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.985363 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.985385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:54 crc kubenswrapper[4793]: I0127 20:03:54.985401 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:54Z","lastTransitionTime":"2026-01-27T20:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.087791 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.087858 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.087870 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.087888 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.087900 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.190289 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.190340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.190353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.190368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.190380 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.197988 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 15:01:47.437720709 +0000 UTC Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.293272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.293320 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.293329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.293345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.293356 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.396379 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.396434 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.396446 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.396464 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.396476 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.499722 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.499808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.499832 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.499859 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.499879 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.602065 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.602114 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.602124 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.602137 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.602145 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.704299 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.704351 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.704363 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.704380 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.704393 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.802890 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.802979 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:55 crc kubenswrapper[4793]: E0127 20:03:55.803047 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:55 crc kubenswrapper[4793]: E0127 20:03:55.803193 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.811019 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.811047 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.811060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.811073 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.811103 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.823329 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.836114 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.850871 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.864101 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.876457 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.887884 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.909087 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e80c79def07c3f2c82f56fa28a75c859ab13ebce959a1d832f998f854b8d684\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"message\\\":\\\":656] Stopping watch factory\\\\nI0127 20:03:30.292708 6227 ovnkube.go:599] Stopped ovnkube\\\\nI0127 20:03:30.292737 6227 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0127 20:03:30.292789 6227 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0127 20:03:30.292939 6227 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:30Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:03:30.292954 6227 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-ingress/router-internal-default\\\\\\\"}\\\\nI0127 20:03:30.292968 6227 services_controller.go:360] Finished syncing service router-internal-default on namespace openshift-ingress for network=default : 2.452521ms\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.913406 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.913457 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.913470 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.913487 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.913499 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:55Z","lastTransitionTime":"2026-01-27T20:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.923898 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.933106 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.951920 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.963480 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.975249 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.984539 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:55 crc kubenswrapper[4793]: I0127 20:03:55.996782 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:55Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.009913 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:56Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.015446 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.015506 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.015517 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.015578 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.015604 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.023630 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:56Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.035945 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:56Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.047355 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:03:56Z is after 2025-08-24T17:21:41Z" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.118228 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.118263 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.118291 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.118306 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.118317 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.198490 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 13:46:15.855180578 +0000 UTC Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.220521 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.220564 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.220572 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.220601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.220613 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.323385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.323426 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.323437 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.323453 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.323465 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.425717 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.425745 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.425755 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.425769 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.425778 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.528229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.528270 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.528282 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.528299 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.528312 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.629849 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.629914 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.629928 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.629943 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.629955 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.731774 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.731832 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.731841 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.731854 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.731863 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.802570 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.802704 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:56 crc kubenswrapper[4793]: E0127 20:03:56.802838 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:56 crc kubenswrapper[4793]: E0127 20:03:56.803000 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.834636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.834687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.834697 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.834714 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.834724 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.937401 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.937451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.937464 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.937481 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:56 crc kubenswrapper[4793]: I0127 20:03:56.937493 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:56Z","lastTransitionTime":"2026-01-27T20:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.039968 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.040016 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.040027 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.040043 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.040055 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.142601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.142658 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.142673 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.142691 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.142704 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.199590 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 01:37:00.654884876 +0000 UTC Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.245729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.245794 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.245810 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.245827 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.245839 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.348788 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.348857 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.348868 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.348884 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.348896 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.452312 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.452384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.452405 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.452431 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.452451 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.554902 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.554950 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.554964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.554982 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.554997 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.657654 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.657696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.657713 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.657730 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.657740 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.759523 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.759577 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.759588 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.759600 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.759607 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.803013 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.803013 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:57 crc kubenswrapper[4793]: E0127 20:03:57.803179 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:57 crc kubenswrapper[4793]: E0127 20:03:57.803293 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.861385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.861430 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.861442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.861458 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.861471 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.964130 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.964174 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.964185 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.964198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:57 crc kubenswrapper[4793]: I0127 20:03:57.964207 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:57Z","lastTransitionTime":"2026-01-27T20:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.066885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.066919 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.066927 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.066941 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.066950 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.169433 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.169473 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.169481 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.169495 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.169504 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.200101 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 01:47:01.598799227 +0000 UTC Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.271436 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.271484 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.271494 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.271506 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.271514 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.374793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.374842 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.374854 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.374868 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.374882 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.478041 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.478102 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.478125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.478156 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.478174 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.581053 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.581101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.581113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.581127 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.581137 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.683775 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.683814 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.683822 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.683835 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.683845 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.786299 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.786361 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.786376 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.786398 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.786418 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.803160 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.803193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:03:58 crc kubenswrapper[4793]: E0127 20:03:58.803343 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:03:58 crc kubenswrapper[4793]: E0127 20:03:58.803462 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.889900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.889949 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.889961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.889997 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.890009 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.992941 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.992989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.993000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.993018 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:58 crc kubenswrapper[4793]: I0127 20:03:58.993029 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:58Z","lastTransitionTime":"2026-01-27T20:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.095766 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.095806 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.095817 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.095842 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.095855 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.199990 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.200048 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.200061 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.200081 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.200091 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.200309 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 22:40:51.011716401 +0000 UTC Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.302187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.302485 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.302590 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.302695 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.302786 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.405848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.405906 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.405918 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.405942 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.405957 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.508222 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.508266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.508281 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.508300 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.508315 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.611680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.611718 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.611730 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.611748 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.611763 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.714623 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.714686 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.714703 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.714728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.714816 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.803997 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.804021 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:03:59 crc kubenswrapper[4793]: E0127 20:03:59.804524 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:03:59 crc kubenswrapper[4793]: E0127 20:03:59.804576 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.816888 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.817094 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.817163 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.817226 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.817295 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.919944 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.919981 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.919989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.920002 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:03:59 crc kubenswrapper[4793]: I0127 20:03:59.920010 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:03:59Z","lastTransitionTime":"2026-01-27T20:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.022723 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.022752 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.022761 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.022772 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.022780 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.124927 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.124968 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.124980 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.124997 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.125008 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.200398 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 16:34:07.923556026 +0000 UTC Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.227063 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.227388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.227646 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.227858 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.228046 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.330631 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.330660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.330669 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.330683 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.330695 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.432436 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.432471 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.432481 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.432496 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.432505 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.534840 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.534882 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.534911 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.534933 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.534944 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.637191 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.637239 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.637250 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.637265 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.637275 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.739479 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.739525 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.739535 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.739567 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.739577 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.803068 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.803110 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:00 crc kubenswrapper[4793]: E0127 20:04:00.803202 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:00 crc kubenswrapper[4793]: E0127 20:04:00.803263 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.841844 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.841873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.841881 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.841893 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.841902 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.944563 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.944602 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.944613 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.944627 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:00 crc kubenswrapper[4793]: I0127 20:04:00.944636 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:00Z","lastTransitionTime":"2026-01-27T20:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.047194 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.047228 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.047237 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.047249 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.047258 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.149838 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.149877 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.149887 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.149900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.149911 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.201056 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 04:50:44.525167659 +0000 UTC Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.251833 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.251878 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.251889 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.251906 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.251916 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.354410 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.354487 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.354505 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.354529 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.354570 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.456255 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.456288 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.456298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.456310 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.456319 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.558958 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.559010 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.559024 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.559042 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.559061 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.661168 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.661204 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.661213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.661225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.661234 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.763526 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.763592 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.763607 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.763623 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.763634 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.802958 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:01 crc kubenswrapper[4793]: E0127 20:04:01.803078 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.803665 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.803805 4793 scope.go:117] "RemoveContainer" containerID="926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20" Jan 27 20:04:01 crc kubenswrapper[4793]: E0127 20:04:01.803818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:01 crc kubenswrapper[4793]: E0127 20:04:01.803942 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.816089 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.828347 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.845512 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.859729 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.866659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.866728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.866753 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.866782 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.866803 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.872049 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.894928 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.913358 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.933446 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.952543 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.969360 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.969399 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.969411 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.969427 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.969437 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:01Z","lastTransitionTime":"2026-01-27T20:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.980699 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:01 crc kubenswrapper[4793]: I0127 20:04:01.991989 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:01Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.006381 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.018258 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.027821 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.039369 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.052774 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.064819 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.071847 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.071884 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.071895 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.071910 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.071920 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.075193 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:02Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.175058 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.175088 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.175097 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.175109 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.175117 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.201927 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 12:39:00.325554353 +0000 UTC Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.277141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.277179 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.277191 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.277210 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.277221 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.379728 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.379818 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.379855 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.379898 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.379922 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.482492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.482542 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.482645 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.482666 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.482677 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.585539 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.585593 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.585604 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.585619 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.585632 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.687757 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.687806 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.687820 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.687839 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.687852 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.790060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.790106 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.790117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.790134 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.790146 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.802330 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.802387 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:02 crc kubenswrapper[4793]: E0127 20:04:02.802443 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:02 crc kubenswrapper[4793]: E0127 20:04:02.802512 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.892989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.893041 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.893053 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.893069 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.893081 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.996519 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.996612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.996633 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.996662 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:02 crc kubenswrapper[4793]: I0127 20:04:02.996680 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:02Z","lastTransitionTime":"2026-01-27T20:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.129568 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.129617 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.129629 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.129647 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.129658 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.202809 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 01:28:32.350725086 +0000 UTC Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.234368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.234418 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.234430 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.234450 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.234461 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.336998 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.337053 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.337070 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.337091 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.337108 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.430544 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.430791 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.430869 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:04:35.430846123 +0000 UTC m=+100.821099319 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.439907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.439975 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.439998 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.440026 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.440047 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.542922 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.542982 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.542994 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.543011 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.543024 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.576626 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.576666 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.576676 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.576692 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.576704 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.591928 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:03Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.595844 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.595879 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.595892 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.595911 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.595922 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.611061 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:03Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.615233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.615284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.615320 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.615339 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.615415 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.634024 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:03Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.638028 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.638083 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.638099 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.638117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.638130 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.652065 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:03Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.655538 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.655590 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.655604 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.655620 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.655631 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.669324 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:03Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.669455 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.670661 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.670695 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.670706 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.670723 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.670735 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.772998 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.773031 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.773042 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.773059 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.773071 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.803828 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.804000 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.804381 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:03 crc kubenswrapper[4793]: E0127 20:04:03.804456 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.875286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.875328 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.875340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.875356 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.875370 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.977390 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.977431 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.977443 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.977459 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:03 crc kubenswrapper[4793]: I0127 20:04:03.977472 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:03Z","lastTransitionTime":"2026-01-27T20:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.080394 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.080429 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.080446 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.080461 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.080472 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.182446 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.182481 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.182491 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.182530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.182570 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.203803 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 04:23:02.510980009 +0000 UTC Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.285098 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.285141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.285153 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.285168 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.285180 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.387229 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.387267 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.387277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.387292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.387303 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.490492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.490530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.490556 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.490573 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.490583 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.592578 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.592607 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.592615 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.592626 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.592636 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.697387 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.697443 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.697461 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.697476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.697488 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.800381 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.800433 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.800447 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.800463 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.800476 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.802383 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.802414 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:04 crc kubenswrapper[4793]: E0127 20:04:04.802698 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:04 crc kubenswrapper[4793]: E0127 20:04:04.802717 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.903388 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.903426 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.903436 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.903452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:04 crc kubenswrapper[4793]: I0127 20:04:04.903466 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:04Z","lastTransitionTime":"2026-01-27T20:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.006187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.006236 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.006249 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.006266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.006278 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.108021 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.108070 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.108082 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.108097 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.108108 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.204934 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 16:39:06.684897956 +0000 UTC Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.211834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.211898 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.211918 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.211945 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.211967 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.314698 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.314734 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.314744 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.314759 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.314771 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.416885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.416955 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.416976 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.417003 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.417020 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.519878 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.519920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.519930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.519944 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.519954 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.622223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.622268 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.622284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.622301 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.622312 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.724434 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.724468 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.724477 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.724496 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.724508 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.802256 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.802328 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:05 crc kubenswrapper[4793]: E0127 20:04:05.802440 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:05 crc kubenswrapper[4793]: E0127 20:04:05.802487 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.817184 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.826441 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.827125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.827147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.827172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.827186 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.827195 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.838432 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.848067 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.864396 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.883723 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.898005 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.909208 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.918620 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930094 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930334 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930358 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930381 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.930391 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:05Z","lastTransitionTime":"2026-01-27T20:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.939907 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.951336 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.963592 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.976913 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:05 crc kubenswrapper[4793]: I0127 20:04:05.997273 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:05Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.011529 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:06Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.022565 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:06Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033206 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:06Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033622 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.033631 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.135536 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.135687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.135709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.135737 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.135757 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.205439 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 02:12:52.87556664 +0000 UTC Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.238778 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.238894 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.238924 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.238961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.238996 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.341981 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.342023 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.342035 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.342052 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.342064 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.444032 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.444362 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.444509 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.444660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.444800 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.546953 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.546981 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.546989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.547001 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.547011 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.649795 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.649828 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.649838 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.649854 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.649865 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.752178 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.752215 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.752225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.752242 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.752254 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.803037 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:06 crc kubenswrapper[4793]: E0127 20:04:06.803199 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.803450 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:06 crc kubenswrapper[4793]: E0127 20:04:06.803588 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.854260 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.854317 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.854333 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.854359 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.854375 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.957261 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.957299 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.957311 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.957325 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:06 crc kubenswrapper[4793]: I0127 20:04:06.957335 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:06Z","lastTransitionTime":"2026-01-27T20:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.060021 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.060658 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.060689 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.060713 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.060725 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.163266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.163310 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.163321 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.163339 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.163348 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.205881 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 06:11:36.068665445 +0000 UTC Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.265147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.265187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.265199 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.265215 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.265226 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.367023 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.367451 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.367612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.367725 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.367818 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.470571 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.470609 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.470626 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.470644 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.470658 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.573305 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.573353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.573364 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.573393 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.573404 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.675087 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.675350 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.675421 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.675491 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.675624 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.778146 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.778390 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.778459 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.778574 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.778677 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.802338 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.802418 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:07 crc kubenswrapper[4793]: E0127 20:04:07.802761 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:07 crc kubenswrapper[4793]: E0127 20:04:07.802789 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.880947 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.880975 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.880986 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.881002 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.881015 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.982621 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.982654 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.982663 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.982677 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:07 crc kubenswrapper[4793]: I0127 20:04:07.982685 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:07Z","lastTransitionTime":"2026-01-27T20:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.085283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.085341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.085355 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.085374 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.085387 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.188210 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.188238 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.188247 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.188258 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.188267 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.206892 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 13:09:11.997535407 +0000 UTC Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.291194 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.291286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.291312 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.291344 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.291363 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.391830 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/0.log" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.391875 4793 generic.go:334] "Generic (PLEG): container finished" podID="d3e7b749-a397-4db6-8b6e-ddde6b3fdced" containerID="d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b" exitCode=1 Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.391900 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerDied","Data":"d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.392296 4793 scope.go:117] "RemoveContainer" containerID="d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.393992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.394013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.394025 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.394039 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.394053 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.405805 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.421270 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.434525 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.448640 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.461296 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.473323 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.485823 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497026 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497104 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497123 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497131 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497144 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.497152 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.515464 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.529248 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.541621 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.557928 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.575893 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.587717 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599540 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599794 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599818 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599829 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599842 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.599852 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.609533 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.621332 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.632807 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:08Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.701822 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.701851 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.701860 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.701874 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.701884 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.803054 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.803065 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:08 crc kubenswrapper[4793]: E0127 20:04:08.803200 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:08 crc kubenswrapper[4793]: E0127 20:04:08.803314 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.805071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.805101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.805113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.805127 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:08 crc kubenswrapper[4793]: I0127 20:04:08.805138 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:08.907214 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:08.907245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:08.907253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:08.907269 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:08.907279 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:08Z","lastTransitionTime":"2026-01-27T20:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.009525 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.009576 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.009585 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.009600 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.009610 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.111943 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.111992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.112001 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.112014 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.112023 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.207265 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 01:00:24.770876952 +0000 UTC Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.213694 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.213729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.213740 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.213755 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.213766 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.316058 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.316091 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.316101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.316114 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.316122 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.396360 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/0.log" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.406713 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerStarted","Data":"5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.418147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.418185 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.418196 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.418212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.418223 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.420696 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.439192 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.451409 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.464635 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.478971 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.490574 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.505638 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.516688 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.526162 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.526316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.526391 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.526470 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.526563 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.530163 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.541062 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.561303 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.572435 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.583632 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.599027 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.619825 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.628517 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.628742 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.628855 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.629002 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.629205 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.633668 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.645832 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.655472 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:09Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.732368 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.732748 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.732881 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.733024 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.733164 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.803218 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:09 crc kubenswrapper[4793]: E0127 20:04:09.803388 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.803715 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:09 crc kubenswrapper[4793]: E0127 20:04:09.803816 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.835172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.835595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.835702 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.835811 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.835904 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.938248 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.938293 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.938303 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.938319 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:09 crc kubenswrapper[4793]: I0127 20:04:09.938332 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:09Z","lastTransitionTime":"2026-01-27T20:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.040275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.040317 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.040326 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.040341 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.040351 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.143212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.143249 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.143258 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.143274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.143284 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.207498 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 00:24:34.536755678 +0000 UTC Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.246020 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.246060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.246075 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.246094 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.246109 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.349000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.349035 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.349047 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.349063 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.349074 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.452194 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.452235 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.452252 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.452272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.452289 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.554808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.554843 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.554859 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.554873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.554883 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.657507 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.657579 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.657604 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.657624 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.657638 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.760777 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.760880 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.760907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.760939 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.760961 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.803153 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.803206 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:10 crc kubenswrapper[4793]: E0127 20:04:10.803331 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:10 crc kubenswrapper[4793]: E0127 20:04:10.803418 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.863632 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.863670 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.863680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.863695 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.863706 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.965973 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.966072 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.966097 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.966125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:10 crc kubenswrapper[4793]: I0127 20:04:10.966144 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:10Z","lastTransitionTime":"2026-01-27T20:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.068692 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.068964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.069027 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.069092 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.069147 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.171843 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.171881 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.171890 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.171902 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.171911 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.208678 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 22:35:23.251462784 +0000 UTC Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.274338 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.275344 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.275626 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.276065 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.276282 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.379107 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.379385 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.379489 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.379615 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.379720 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.482844 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.483163 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.483187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.483208 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.483220 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.586068 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.586112 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.586123 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.586141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.586152 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.689024 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.689065 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.689081 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.689100 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.689114 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.790835 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.790869 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.790877 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.790890 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.790899 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.803809 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:11 crc kubenswrapper[4793]: E0127 20:04:11.804005 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.804061 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:11 crc kubenswrapper[4793]: E0127 20:04:11.804107 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.892505 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.892582 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.892598 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.892618 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.892634 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.995640 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.995690 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.995709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.995726 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:11 crc kubenswrapper[4793]: I0127 20:04:11.995738 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:11Z","lastTransitionTime":"2026-01-27T20:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.097984 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.098301 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.098422 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.098603 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.098745 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.201103 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.201147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.201160 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.201177 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.201192 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.209598 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 05:18:25.780647455 +0000 UTC Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.304375 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.304437 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.304455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.304478 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.304494 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.406437 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.406504 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.406526 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.406607 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.406641 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.509576 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.509640 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.509664 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.509691 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.509711 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.611480 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.611775 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.611875 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.611978 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.612072 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.715326 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.715356 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.715364 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.715377 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.715386 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.803208 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.803217 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:12 crc kubenswrapper[4793]: E0127 20:04:12.803436 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:12 crc kubenswrapper[4793]: E0127 20:04:12.803675 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.804366 4793 scope.go:117] "RemoveContainer" containerID="926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.817252 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.817302 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.817320 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.817345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.817362 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.921704 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.921747 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.921760 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.921779 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:12 crc kubenswrapper[4793]: I0127 20:04:12.921789 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:12Z","lastTransitionTime":"2026-01-27T20:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.024302 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.024343 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.024351 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.024365 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.024374 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.129083 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.129166 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.129186 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.129212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.129230 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.210147 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 02:23:00.376097467 +0000 UTC Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.232137 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.232287 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.232308 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.232329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.232343 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.334492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.334524 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.334531 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.334566 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.334577 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.425003 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/2.log" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.429223 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.429766 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.437690 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.437747 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.437764 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.437787 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.437803 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.445241 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.467526 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.483327 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.500958 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.519521 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.532402 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.539798 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.539826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.539834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.539848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.539858 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.548784 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.566069 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.581936 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.592839 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.611304 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.630142 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.642463 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.642492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.642501 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.642514 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.642522 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.644326 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.668104 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.680274 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.695619 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.708559 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.718197 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.744971 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.745004 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.745012 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.745027 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.745036 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.796472 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.796516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.796528 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.796560 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.796572 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.804399 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.804420 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.804659 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.804790 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.811529 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.818081 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.818111 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.818120 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.818137 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.818148 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.832652 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.836909 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.836970 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.836985 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.837004 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.837019 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.849566 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.852835 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.852878 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.852889 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.852903 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.852913 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.865299 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.870006 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.870062 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.870077 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.870097 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.870109 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.888374 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:13 crc kubenswrapper[4793]: E0127 20:04:13.888509 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.890257 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.890298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.890309 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.890327 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.890341 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.995569 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.995637 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.995655 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.995680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:13 crc kubenswrapper[4793]: I0127 20:04:13.995699 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:13Z","lastTransitionTime":"2026-01-27T20:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.098931 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.098964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.098972 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.098985 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.098996 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.200808 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.200853 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.200865 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.200881 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.200906 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.210484 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 10:45:13.031637365 +0000 UTC Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.303235 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.303287 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.303298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.303316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.303328 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.406497 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.406592 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.406606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.406630 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.406643 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.510776 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.510858 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.510891 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.510924 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.510945 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.614612 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.614655 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.614670 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.614688 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.614703 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.717525 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.717585 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.717604 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.717621 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.717630 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.803029 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.803129 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:14 crc kubenswrapper[4793]: E0127 20:04:14.803386 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:14 crc kubenswrapper[4793]: E0127 20:04:14.803525 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.819502 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.819563 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.819577 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.819596 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.819607 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.923236 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.923298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.923316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.923340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:14 crc kubenswrapper[4793]: I0127 20:04:14.923357 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:14Z","lastTransitionTime":"2026-01-27T20:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.025907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.025955 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.025968 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.025985 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.025996 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.128659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.128702 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.128715 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.128758 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.128772 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.211361 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 22:23:58.88651388 +0000 UTC Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.231828 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.231900 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.231922 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.231951 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.231973 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.334482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.334522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.334531 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.334570 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.334584 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.437188 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.437283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.437297 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.437316 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.437328 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.438629 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/3.log" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.439607 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/2.log" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.444698 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" exitCode=1 Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.444765 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.444968 4793 scope.go:117] "RemoveContainer" containerID="926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.446486 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:04:15 crc kubenswrapper[4793]: E0127 20:04:15.447282 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.464560 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.480897 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.495637 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.508222 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.519588 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.535191 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.540207 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.540245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.540258 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.540273 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.540285 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.545035 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.562352 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:14Z\\\",\\\"message\\\":\\\".4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0127 20:04:14.031345 6854 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:04:14.031190 6854 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0127 20:04:14.031359 6854 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:04:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.576744 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.588443 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.598967 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.610828 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.630439 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.639283 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.642463 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.642507 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.642520 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.642537 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.642567 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.649798 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.660714 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.674453 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.693076 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.744463 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.744510 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.744522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.744538 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.744568 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.803078 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.803139 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:15 crc kubenswrapper[4793]: E0127 20:04:15.803215 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:15 crc kubenswrapper[4793]: E0127 20:04:15.803394 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.817269 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.830269 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.847761 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.847793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.847802 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.847815 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.847825 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.848104 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:14Z\\\",\\\"message\\\":\\\".4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0127 20:04:14.031345 6854 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:04:14.031190 6854 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0127 20:04:14.031359 6854 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:04:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.865206 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.876102 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.895386 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.907588 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.919482 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.929330 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.941331 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.949487 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.949517 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.949525 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.949539 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.949562 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:15Z","lastTransitionTime":"2026-01-27T20:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.951664 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.965322 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.976667 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:15 crc kubenswrapper[4793]: I0127 20:04:15.987258 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.000872 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:15Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.016897 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:16Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.029299 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:16Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.040887 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:16Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.051187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.051222 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.051238 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.051253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.051264 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.153858 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.154142 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.154218 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.154310 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.154377 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.212051 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 12:48:12.511902168 +0000 UTC Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.256912 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.257353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.257520 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.257784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.257949 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.360849 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.360921 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.360934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.360954 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.360967 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.450836 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/3.log" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.463687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.463724 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.463732 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.463746 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.463756 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.566070 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.566116 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.566129 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.566145 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.566159 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.668408 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.668452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.668466 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.668482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.668494 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.770455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.770500 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.770508 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.770524 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.770534 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.802417 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.802467 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:16 crc kubenswrapper[4793]: E0127 20:04:16.802619 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:16 crc kubenswrapper[4793]: E0127 20:04:16.802698 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.873015 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.873060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.873071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.873087 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.873098 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.975650 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.975713 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.975731 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.975777 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:16 crc kubenswrapper[4793]: I0127 20:04:16.975794 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:16Z","lastTransitionTime":"2026-01-27T20:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.078501 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.078579 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.078592 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.078610 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.078625 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.182227 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.182306 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.182328 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.182358 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.182381 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.213223 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 09:50:15.026312721 +0000 UTC Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.285292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.285406 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.285432 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.285466 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.285490 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.388939 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.389010 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.389038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.389223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.389245 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.491190 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.491227 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.491237 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.491252 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.491263 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.594980 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.595062 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.595088 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.595119 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.595140 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.697695 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.697773 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.697787 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.697831 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.697844 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.800340 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.800384 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.800395 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.800413 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.800426 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.802259 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.802345 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:17 crc kubenswrapper[4793]: E0127 20:04:17.802374 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:17 crc kubenswrapper[4793]: E0127 20:04:17.802507 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.903452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.903494 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.903506 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.903522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:17 crc kubenswrapper[4793]: I0127 20:04:17.903533 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:17Z","lastTransitionTime":"2026-01-27T20:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.006424 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.006462 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.006474 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.006488 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.006499 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.109441 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.109502 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.109603 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.109696 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.109732 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.212054 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.212096 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.212108 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.212125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.212135 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.213408 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 04:38:08.010705706 +0000 UTC Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.314998 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.315055 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.315080 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.315107 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.315128 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.418494 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.418529 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.418541 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.418594 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.418606 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.521966 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.522013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.522025 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.522085 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.522163 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.625572 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.625636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.625656 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.625670 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.625679 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.728792 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.728877 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.728905 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.728932 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.728949 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.802971 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.803028 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:18 crc kubenswrapper[4793]: E0127 20:04:18.803076 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:18 crc kubenswrapper[4793]: E0127 20:04:18.803133 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.831141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.831191 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.831201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.831223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.831285 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.933416 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.933473 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.933481 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.933495 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:18 crc kubenswrapper[4793]: I0127 20:04:18.933506 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:18Z","lastTransitionTime":"2026-01-27T20:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.036867 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.036908 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.036919 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.036938 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.037143 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.139660 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.139698 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.139708 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.139722 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.139732 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.213745 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 17:51:38.383172242 +0000 UTC Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.242517 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.242580 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.242596 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.242613 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.242624 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.345406 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.345445 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.345454 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.345469 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.345479 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.447735 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.447826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.447839 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.447854 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.447865 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.549949 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.549992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.550002 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.550022 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.550034 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.652133 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.652172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.652183 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.652197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.652208 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.754283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.754325 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.754334 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.754348 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.754362 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.803125 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.803164 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:19 crc kubenswrapper[4793]: E0127 20:04:19.803247 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:19 crc kubenswrapper[4793]: E0127 20:04:19.803424 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.857233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.857275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.857284 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.857299 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.857308 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.959754 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.959809 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.959821 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.959838 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:19 crc kubenswrapper[4793]: I0127 20:04:19.959852 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:19Z","lastTransitionTime":"2026-01-27T20:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.062382 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.062442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.062455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.062471 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.062482 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.164891 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.164927 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.164939 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.164954 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.164965 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.214230 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 17:57:46.357615551 +0000 UTC Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.267729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.267763 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.267770 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.267784 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.267793 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.370332 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.370365 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.370375 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.370387 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.370396 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.474108 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.474185 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.474197 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.474213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.474225 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.577453 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.577504 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.577516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.577534 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.577579 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.680365 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.680402 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.680410 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.680423 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.680435 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.711082 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711243 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.711215246 +0000 UTC m=+150.101468402 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.711532 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.711596 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.711625 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.711661 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711668 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711715 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711730 4793 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711738 4793 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711742 4793 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711749 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711842 4793 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711790 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.711772427 +0000 UTC m=+150.102025633 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711857 4793 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711885 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.71187273 +0000 UTC m=+150.102125886 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711900 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.711892761 +0000 UTC m=+150.102145917 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.711909 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.711904651 +0000 UTC m=+150.102157797 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.783913 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.783973 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.783989 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.784012 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.784028 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.802166 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.802235 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.802365 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:20 crc kubenswrapper[4793]: E0127 20:04:20.802434 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.886697 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.886762 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.886778 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.886801 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.886820 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.989976 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.990093 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.990132 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.990161 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:20 crc kubenswrapper[4793]: I0127 20:04:20.990184 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:20Z","lastTransitionTime":"2026-01-27T20:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.092916 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.092981 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.092992 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.093008 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.093019 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.196056 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.196092 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.196104 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.196121 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.196133 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.214931 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 04:23:19.678329911 +0000 UTC Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.299201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.299243 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.299272 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.299293 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.299310 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.401916 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.402000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.402071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.402106 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.402132 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.504242 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.504277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.504286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.504300 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.504309 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.606147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.606189 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.606200 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.606216 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.606229 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.708406 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.708438 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.708448 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.708464 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.708474 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.802678 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.802693 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:21 crc kubenswrapper[4793]: E0127 20:04:21.802815 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:21 crc kubenswrapper[4793]: E0127 20:04:21.802946 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.809911 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.809948 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.809957 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.809973 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.809984 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.912606 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.912646 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.912656 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.912670 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:21 crc kubenswrapper[4793]: I0127 20:04:21.912679 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:21Z","lastTransitionTime":"2026-01-27T20:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.015651 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.015708 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.015723 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.015746 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.015762 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.119274 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.119329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.119375 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.119396 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.119410 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.215291 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 08:15:43.461144718 +0000 UTC Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.222752 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.222810 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.222820 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.222861 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.222874 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.325279 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.325329 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.325343 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.325364 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.325378 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.429306 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.429419 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.429440 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.429465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.429484 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.532424 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.532498 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.532510 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.532525 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.532536 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.635636 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.635683 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.635709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.635729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.635740 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.737641 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.737671 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.737680 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.737694 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.737705 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.802690 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:22 crc kubenswrapper[4793]: E0127 20:04:22.802814 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.802957 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:22 crc kubenswrapper[4793]: E0127 20:04:22.803146 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.839927 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.840007 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.840021 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.840038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.840076 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.942115 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.942156 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.942164 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.942178 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:22 crc kubenswrapper[4793]: I0127 20:04:22.942190 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:22Z","lastTransitionTime":"2026-01-27T20:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.045948 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.046013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.046028 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.046050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.046065 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.148121 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.148159 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.148172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.148186 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.148197 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.216329 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 22:14:19.431125891 +0000 UTC Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.250736 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.250780 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.250791 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.250809 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.250821 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.353909 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.353980 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.354009 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.354039 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.354057 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.456364 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.456426 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.456441 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.456487 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.456503 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.558950 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.558991 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.559000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.559015 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.559024 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.662839 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.662865 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.662873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.662886 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.662894 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.765843 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.765887 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.765896 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.765913 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.765922 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.803240 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.803316 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:23 crc kubenswrapper[4793]: E0127 20:04:23.803602 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:23 crc kubenswrapper[4793]: E0127 20:04:23.803708 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.816115 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.867825 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.867860 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.867872 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.867887 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.867897 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.912726 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.912848 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.912876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.912907 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.912929 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:23 crc kubenswrapper[4793]: E0127 20:04:23.927105 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.934298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.934434 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.934534 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.934801 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:23 crc kubenswrapper[4793]: I0127 20:04:23.934863 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:23Z","lastTransitionTime":"2026-01-27T20:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.101818 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:23Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.107601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.107890 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.108123 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.108303 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.108453 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.120401 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.124066 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.124221 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.124300 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.124401 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.124491 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.135724 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.139084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.139116 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.139125 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.139138 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.139148 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.152336 4793 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a6b56745-8427-4204-9b6c-d7dcd3910687\\\",\\\"systemUUID\\\":\\\"8036f266-8acb-467b-9132-f34114b96520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:24Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.152515 4793 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.153993 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.154029 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.154038 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.154052 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.154061 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.216639 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 03:18:54.157341594 +0000 UTC Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.256818 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.257081 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.257147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.257223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.257293 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.360281 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.360328 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.360342 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.360355 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.360365 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.462516 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.462588 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.462601 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.462617 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.462629 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.565206 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.565470 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.565576 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.565675 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.565735 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.668055 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.668112 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.668124 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.668136 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.668145 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.771013 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.771063 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.771075 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.771093 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.771105 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.802452 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.802631 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.802477 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:24 crc kubenswrapper[4793]: E0127 20:04:24.802736 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.873793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.874086 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.874192 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.874298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.874383 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.976699 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.976768 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.976788 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.976816 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:24 crc kubenswrapper[4793]: I0127 20:04:24.976837 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:24Z","lastTransitionTime":"2026-01-27T20:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.079428 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.079461 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.079469 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.079482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.079490 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.182115 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.182153 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.182160 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.182175 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.182189 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.217101 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 05:56:36.17777868 +0000 UTC Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.284729 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.284826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.284838 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.284852 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.284862 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.388503 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.388607 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.388631 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.388659 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.388680 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.491994 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.492117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.492141 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.492171 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.492196 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.595134 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.595184 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.595192 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.595207 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.595216 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.698016 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.698059 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.698071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.698087 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.698098 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.799823 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.799896 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.799948 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.799966 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.799980 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.803182 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:25 crc kubenswrapper[4793]: E0127 20:04:25.803283 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.803399 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:25 crc kubenswrapper[4793]: E0127 20:04:25.803673 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.819436 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78ce0a88-78df-4582-8bbc-3129f83b0570\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a24ee27fbe59c33f05ab2bff06cba7590935b68fe0141ff25c7255090a88a434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff9a3c2dbf82b66bf0fffab5773b0810b23d9827375ccab86a54ac37cf3eca45\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1da8b85ea60e572d433d1d65f97a03787258318bbc7b72ab27c5be8934d53538\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.836874 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.852679 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.865392 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25666aca-21d3-4cae-8386-90aaaebd1a52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6553aabafaecdd1c6fa9fd7e3d698840346ee252d0dcf82deb601b2573fe124b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7a149cc9418d4b0411f89f4ec976811b909213b40d67ada553888bbc43e71dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b2d2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-52dvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.877402 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7k9v7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3e7b749-a397-4db6-8b6e-ddde6b3fdced\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:04:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:08Z\\\",\\\"message\\\":\\\"2026-01-27T20:03:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd\\\\n2026-01-27T20:03:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_bcbef9de-a5b7-4bc6-866c-277440b968dd to /host/opt/cni/bin/\\\\n2026-01-27T20:03:23Z [verbose] multus-daemon started\\\\n2026-01-27T20:03:23Z [verbose] Readiness Indicator file check\\\\n2026-01-27T20:04:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:04:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mmlxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7k9v7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.887474 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"93412db5-52e2-4b3a-aee4-3c43f090750e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lkzm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:31Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gsrf9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.897986 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dafc1d4f-e681-4d03-b3cf-b5db3d4fad74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba4d1738f6b64ad1e3c6338180b578ab559e038763880acb005b80ec2b0bde38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49f17253bed9de06fefacec37381e23f24486f77508298e3db45218a50c2a407\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d37df3fad0cbffb0d04fc9ad6dd2f37186870c68993262c43a150bd38f31242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cc576ddeabbb08775d578c92ca29c9f6574d6bbf0673f4253749ca533d29f1a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.902162 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.902196 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.902209 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.902225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.902235 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:25Z","lastTransitionTime":"2026-01-27T20:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.907017 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a2c0e50-3e3d-40ee-9de1-56b683da2074\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08528cf770f8410a20d0d811501da40838dece3ce776acdb5d73b371e795bbb7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0753d10384c65b09f375673073051d1cf8309dd69ac51676617071f33a313a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0753d10384c65b09f375673073051d1cf8309dd69ac51676617071f33a313a59\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.918568 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3de65702aa3148ca1fa6bc20ef78d6d448ec8a1f66d764572254444ded40c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.940296 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://926e45957a6df0fc1b014aef70454b874d60e7f7dfc568df78d15ea825fd9e20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:03:48Z\\\",\\\"message\\\":\\\"all/v1/apis/informers/externalversions/factory.go:140\\\\nI0127 20:03:48.910920 6456 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0127 20:03:48.910963 6456 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0127 20:03:48.910970 6456 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0127 20:03:48.910984 6456 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0127 20:03:48.910991 6456 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0127 20:03:48.910996 6456 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Node event handler 2\\\\nI0127 20:03:48.911022 6456 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0127 20:03:48.911052 6456 handler.go:208] Removed *v1.Node event handler 7\\\\nI0127 20:03:48.911039 6456 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0127 20:03:48.911068 6456 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0127 20:03:48.911079 6456 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0127 20:03:48.911090 6456 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0127 20:03:48.911096 6456 factory.go:656] Stopping watch factory\\\\nI0127 20:03:48.911104 6456 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-27T20:04:14Z\\\",\\\"message\\\":\\\".4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0127 20:04:14.031345 6854 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:13Z is after 2025-08-24T17:21:41Z]\\\\nI0127 20:04:14.031190 6854 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0127 20:04:14.031359 6854 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:04:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rpdjd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8glmz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.952622 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.966850 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb16a16f-6f5f-4462-be09-372a8b10739a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://650964d580f86cfb70e8eacdaf5dfb7e62e25b724492f1197da97ca1583ed050\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gq8gn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:25 crc kubenswrapper[4793]: I0127 20:04:25.983634 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fe475131-3b65-45aa-a877-190a8bdec86f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e160bf8ded41492ff7bad67b6f2ae2aa9686b5f0ca59c8f68edc9947fa98d6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://139c04b94279a60a30f5393c9563ba3da7dace91bb88eb31f12ba864f2676c54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c57f0f0ff61ff78a47dc2f17e7f022a99e329a81adc228faf7ebb2b8087f38cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://923e1e9b5cde87728fe1465444d03815ee7c42ac9f2026c87ddd363db8845f90\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a378a731b492c77b33c2a7deea30a0c85b52e47ebcfbaf80dd63c9945435c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82cc434f309258f29acdaf85a40b13eebd318de04884588f15abee0464972acb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f2745c6ca33f1a019ea3e4fbfc28e7e6d5e02a2218a392cb70787641a9dd8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:03:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kccqx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:17Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fgp7j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:25Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.004700 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.004771 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.004794 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.004822 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.004842 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.005438 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d31e70fa-c68b-4bf3-bb47-c57ebbf15526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://889ca6eee06d2231cc6ea690e0feb8cb72b079e13353d17404ef73975e894f07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4042c3753c1e1915118bd7df7ef1ad938dfca3279b7e610f47a4e9b3c519140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b1eb8f0c51d1c0126b8a380b73e29fedd75466b0db6b0a3b50985139b47e589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e14d8c8a27a2da98131332bc2cb96b6c3571aa215cf8367cac22b7dfedc7bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf04381d970896e7c6f449d30e0ffd583071c6ed9be738a934e9065ca3dbd4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69c2a374908ddc25d634c9db661b81688c8bd9ee101a5bdf1f50dd63b90e2bf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b65f05865a007de477ca23535b154b1a30e2d7d7a9aff041dfcb57c59824784a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad7f00ccc8307f99ab7a9bc46b720b0f98fdd98afe2db1d77d9578527236a0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.020130 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:02:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"message\\\":\\\"le observer\\\\nW0127 20:03:16.536644 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0127 20:03:16.536768 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 20:03:16.537743 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2125205372/tls.crt::/tmp/serving-cert-2125205372/tls.key\\\\\\\"\\\\nI0127 20:03:17.001216 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 20:03:17.003674 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 20:03:17.003691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 20:03:17.003712 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 20:03:17.003717 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 20:03:17.008831 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0127 20:03:17.008851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 20:03:17.008860 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 20:03:17.008864 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 20:03:17.008867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 20:03:17.008870 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0127 20:03:17.008871 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0127 20:03:17.010613 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T20:03:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:02:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T20:02:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T20:02:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:02:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.033299 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7215f9c9f836c567811b06f3b3318eacef2abad9ac6bf1b1aba73aa8a2d4aca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.044957 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mpxz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8338625a-5d99-48c1-a7ff-d4542b624045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2394bf3ab63b6ec7997a6681638aa1a9b4b83d71a2170d02b18a1d7fd3973250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mds8v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:16Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mpxz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.056712 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e64331a3dfc145fb83a0d4a66b173a27bb55cd000c322823bd911ea2a20e1602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f3a429c9a084ee0b5a98dcb0fb9c5a0b33e4cd24aae601de59236b896a9c6a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.067230 4793 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tl72n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb7901e2-b8cb-40a2-9e7d-7c7a06c1eec6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T20:03:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb63c44c581eea2a993f5e61467365b1a788b60a449103a15c0182398e7c6ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T20:03:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8knb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T20:03:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tl72n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-27T20:04:26Z is after 2025-08-24T17:21:41Z" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.107225 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.107259 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.107266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.107281 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.107291 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.209836 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.209875 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.209885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.209899 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.209916 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.218272 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 09:52:33.792427144 +0000 UTC Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.312438 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.312486 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.312497 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.312513 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.312524 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.414405 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.414455 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.414474 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.414495 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.414510 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.517206 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.517253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.517266 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.517283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.517297 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.619483 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.619632 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.619705 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.619732 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.619750 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.721885 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.721946 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.721963 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.721984 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.721999 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.803088 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:26 crc kubenswrapper[4793]: E0127 20:04:26.803254 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.803605 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:26 crc kubenswrapper[4793]: E0127 20:04:26.803715 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.824875 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.825047 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.825074 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.825107 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.825130 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.927923 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.928588 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.928622 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.928644 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:26 crc kubenswrapper[4793]: I0127 20:04:26.928662 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:26Z","lastTransitionTime":"2026-01-27T20:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.031084 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.031166 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.031179 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.031198 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.031213 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.133895 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.133947 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.133961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.133978 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.133991 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.218423 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 12:32:00.918183904 +0000 UTC Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.236535 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.236587 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.236595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.236610 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.236618 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.339220 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.339275 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.339290 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.339311 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.339326 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.442203 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.442251 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.442263 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.442277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.442288 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.545912 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.545964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.545977 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.545997 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.546009 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.649131 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.649204 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.649226 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.649254 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.649277 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.754523 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.754574 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.754584 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.754749 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.754758 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.803093 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:27 crc kubenswrapper[4793]: E0127 20:04:27.803283 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.803349 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:27 crc kubenswrapper[4793]: E0127 20:04:27.803495 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.857586 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.857646 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.857656 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.857671 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.857680 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.960870 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.960920 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.960930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.960946 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:27 crc kubenswrapper[4793]: I0127 20:04:27.960957 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:27Z","lastTransitionTime":"2026-01-27T20:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.064102 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.064143 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.064153 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.064169 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.064180 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.167050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.167085 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.167093 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.167119 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.167130 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.219596 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 16:08:22.120456435 +0000 UTC Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.269227 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.269269 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.269277 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.269291 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.269300 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.371500 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.371571 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.371582 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.371597 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.371608 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.473910 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.473946 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.473959 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.473971 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.473979 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.576488 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.576521 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.576530 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.576543 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.576612 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.679669 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.679707 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.679716 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.679731 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.679740 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.782292 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.782322 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.782332 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.782346 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.782356 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.802828 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:28 crc kubenswrapper[4793]: E0127 20:04:28.802941 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.803638 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:28 crc kubenswrapper[4793]: E0127 20:04:28.803722 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.885256 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.885297 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.885309 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.885326 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.885339 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.987433 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.987476 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.987487 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.987505 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:28 crc kubenswrapper[4793]: I0127 20:04:28.987516 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:28Z","lastTransitionTime":"2026-01-27T20:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.089678 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.089738 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.089750 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.089767 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.089778 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.192018 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.192051 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.192060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.192072 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.192082 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.220089 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 00:07:13.152509508 +0000 UTC Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.293932 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.293971 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.293983 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.294000 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.294029 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.400456 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.400502 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.400519 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.400541 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.400577 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.502757 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.502793 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.502801 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.502815 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.502824 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.605213 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.605281 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.605302 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.605336 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.605371 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.707755 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.707800 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.707809 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.707826 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.707836 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.802944 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.802975 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:29 crc kubenswrapper[4793]: E0127 20:04:29.803893 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.803930 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:04:29 crc kubenswrapper[4793]: E0127 20:04:29.803970 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:29 crc kubenswrapper[4793]: E0127 20:04:29.804156 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.810223 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.810269 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.810285 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.810307 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.810323 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.828411 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-7k9v7" podStartSLOduration=72.82838784 podStartE2EDuration="1m12.82838784s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.828266128 +0000 UTC m=+95.218519284" watchObservedRunningTime="2026-01-27 20:04:29.82838784 +0000 UTC m=+95.218640996" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.867789 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=6.867769008 podStartE2EDuration="6.867769008s" podCreationTimestamp="2026-01-27 20:04:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.867593275 +0000 UTC m=+95.257846451" watchObservedRunningTime="2026-01-27 20:04:29.867769008 +0000 UTC m=+95.258022164" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.868287 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=45.868279909 podStartE2EDuration="45.868279909s" podCreationTimestamp="2026-01-27 20:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.856043108 +0000 UTC m=+95.246296274" watchObservedRunningTime="2026-01-27 20:04:29.868279909 +0000 UTC m=+95.258533065" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.912288 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.912321 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.912330 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.912342 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.912352 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:29Z","lastTransitionTime":"2026-01-27T20:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.952069 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podStartSLOduration=73.952037282 podStartE2EDuration="1m13.952037282s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.933012897 +0000 UTC m=+95.323266053" watchObservedRunningTime="2026-01-27 20:04:29.952037282 +0000 UTC m=+95.342290448" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.996561 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-fgp7j" podStartSLOduration=72.99653023 podStartE2EDuration="1m12.99653023s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.952618244 +0000 UTC m=+95.342871420" watchObservedRunningTime="2026-01-27 20:04:29.99653023 +0000 UTC m=+95.386783386" Jan 27 20:04:29 crc kubenswrapper[4793]: I0127 20:04:29.997101 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=71.997095622 podStartE2EDuration="1m11.997095622s" podCreationTimestamp="2026-01-27 20:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:29.995474187 +0000 UTC m=+95.385727343" watchObservedRunningTime="2026-01-27 20:04:29.997095622 +0000 UTC m=+95.387348788" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.014911 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.014942 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.014950 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.014964 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.014974 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.038680 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=73.038661107 podStartE2EDuration="1m13.038661107s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:30.022176376 +0000 UTC m=+95.412429522" watchObservedRunningTime="2026-01-27 20:04:30.038661107 +0000 UTC m=+95.428914263" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.066255 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-mpxz5" podStartSLOduration=75.066237604 podStartE2EDuration="1m15.066237604s" podCreationTimestamp="2026-01-27 20:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:30.050425517 +0000 UTC m=+95.440678673" watchObservedRunningTime="2026-01-27 20:04:30.066237604 +0000 UTC m=+95.456490760" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.076328 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-tl72n" podStartSLOduration=74.076311569 podStartE2EDuration="1m14.076311569s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:30.075744516 +0000 UTC m=+95.465997672" watchObservedRunningTime="2026-01-27 20:04:30.076311569 +0000 UTC m=+95.466564725" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.091632 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=74.091614334 podStartE2EDuration="1m14.091614334s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:30.091297767 +0000 UTC m=+95.481550923" watchObservedRunningTime="2026-01-27 20:04:30.091614334 +0000 UTC m=+95.481867490" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.117783 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.117834 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.117876 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.117892 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.117902 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.129893 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-52dvp" podStartSLOduration=73.129876209 podStartE2EDuration="1m13.129876209s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:30.129378008 +0000 UTC m=+95.519631164" watchObservedRunningTime="2026-01-27 20:04:30.129876209 +0000 UTC m=+95.520129365" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220245 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 09:58:18.497561038 +0000 UTC Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220444 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220490 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220499 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220513 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.220523 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.323382 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.323442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.323452 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.323469 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.323480 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.425491 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.425531 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.425571 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.425589 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.425601 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.527829 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.527873 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.527883 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.527899 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.527910 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.630167 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.630216 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.630228 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.630245 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.630257 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.733071 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.733113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.733124 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.733139 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.733151 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.802846 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.802846 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:30 crc kubenswrapper[4793]: E0127 20:04:30.802995 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:30 crc kubenswrapper[4793]: E0127 20:04:30.803058 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.836381 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.836418 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.836427 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.836442 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.836451 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.940060 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.940119 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.940132 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.940152 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:30 crc kubenswrapper[4793]: I0127 20:04:30.940164 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:30Z","lastTransitionTime":"2026-01-27T20:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.043625 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.043712 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.043723 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.043747 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.043761 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.146402 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.146465 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.146475 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.146492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.146503 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.220706 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 11:18:33.414899052 +0000 UTC Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.252298 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.252353 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.252365 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.252382 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.252406 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.355052 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.355089 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.355101 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.355117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.355128 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.459055 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.459102 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.459113 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.459129 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.459139 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.561954 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.561999 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.562014 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.562036 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.562052 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.663974 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.664005 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.664015 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.664029 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.664040 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.767201 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.767242 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.767253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.767270 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.767282 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.803094 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.803237 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:31 crc kubenswrapper[4793]: E0127 20:04:31.803272 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:31 crc kubenswrapper[4793]: E0127 20:04:31.803449 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.870097 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.870152 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.870168 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.870190 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.870208 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.972457 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.972492 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.972505 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.972522 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:31 crc kubenswrapper[4793]: I0127 20:04:31.972536 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:31Z","lastTransitionTime":"2026-01-27T20:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.074709 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.074761 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.074775 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.074792 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.074803 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.177088 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.177172 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.177187 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.177212 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.177227 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.220962 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 02:25:58.329582475 +0000 UTC Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.279520 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.279582 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.279595 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.279615 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.279627 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.381985 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.382040 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.382051 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.382067 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.382080 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.484825 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.484921 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.484935 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.484953 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.484970 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.588313 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.588366 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.588381 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.588398 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.588409 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.690961 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.690999 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.691010 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.691026 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.691035 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.793171 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.793233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.793254 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.793286 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.793307 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.802508 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.802607 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:32 crc kubenswrapper[4793]: E0127 20:04:32.802686 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:32 crc kubenswrapper[4793]: E0127 20:04:32.803470 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.896001 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.896085 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.896099 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.896126 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.896140 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.998182 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.998242 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.998252 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.998267 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:32 crc kubenswrapper[4793]: I0127 20:04:32.998276 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:32Z","lastTransitionTime":"2026-01-27T20:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.100067 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.100117 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.100130 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.100147 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.100161 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.202615 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.202649 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.202657 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.202671 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.202680 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.221379 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 15:18:00.966308255 +0000 UTC Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.304833 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.304901 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.304934 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.304972 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.304998 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.407642 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.407679 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.407687 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.407700 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.407721 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.509869 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.509906 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.509917 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.509930 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.509940 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.611921 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.611955 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.611966 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.611980 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.611991 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.714300 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.714334 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.714345 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.714358 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.714368 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.802788 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.802866 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:33 crc kubenswrapper[4793]: E0127 20:04:33.802975 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:33 crc kubenswrapper[4793]: E0127 20:04:33.803070 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.816986 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.817027 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.817036 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.817050 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.817059 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.919410 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.919456 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.919467 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.919482 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:33 crc kubenswrapper[4793]: I0127 20:04:33.919492 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:33Z","lastTransitionTime":"2026-01-27T20:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.023170 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.023233 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.023253 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.023283 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.023303 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:34Z","lastTransitionTime":"2026-01-27T20:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.126044 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.126083 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.126096 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.126111 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.126121 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:34Z","lastTransitionTime":"2026-01-27T20:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.222293 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 07:30:36.933959346 +0000 UTC Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.228924 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.228988 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.229005 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.229027 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.229041 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:34Z","lastTransitionTime":"2026-01-27T20:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.252971 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.253056 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.253082 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.253116 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.253139 4793 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T20:04:34Z","lastTransitionTime":"2026-01-27T20:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.308758 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv"] Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.309417 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.311782 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.312185 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.312192 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.312288 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.410521 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.410614 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08ad1ac9-4143-43bc-b031-578324934321-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.410646 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.410689 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08ad1ac9-4143-43bc-b031-578324934321-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.410714 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08ad1ac9-4143-43bc-b031-578324934321-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.511889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512296 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08ad1ac9-4143-43bc-b031-578324934321-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512394 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512435 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08ad1ac9-4143-43bc-b031-578324934321-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512494 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08ad1ac9-4143-43bc-b031-578324934321-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512729 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.512733 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/08ad1ac9-4143-43bc-b031-578324934321-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.517270 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08ad1ac9-4143-43bc-b031-578324934321-service-ca\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.520928 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08ad1ac9-4143-43bc-b031-578324934321-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.532448 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08ad1ac9-4143-43bc-b031-578324934321-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-4zgzv\" (UID: \"08ad1ac9-4143-43bc-b031-578324934321\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.624817 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.802195 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:34 crc kubenswrapper[4793]: E0127 20:04:34.802339 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:34 crc kubenswrapper[4793]: I0127 20:04:34.802446 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:34 crc kubenswrapper[4793]: E0127 20:04:34.802599 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.223441 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 22:31:23.567678059 +0000 UTC Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.223487 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.233653 4793 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.516017 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" event={"ID":"08ad1ac9-4143-43bc-b031-578324934321","Type":"ContainerStarted","Data":"23d553825581bc1b9ae18bc6ba515ba549ad17bd18777486f14c93d620f086ce"} Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.516072 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" event={"ID":"08ad1ac9-4143-43bc-b031-578324934321","Type":"ContainerStarted","Data":"81d7f1daea737dd7c6af5f0e9328b9a7390773ff73579fee9fe9e2b8a58b919c"} Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.526302 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:35 crc kubenswrapper[4793]: E0127 20:04:35.526471 4793 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:04:35 crc kubenswrapper[4793]: E0127 20:04:35.526533 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs podName:93412db5-52e2-4b3a-aee4-3c43f090750e nodeName:}" failed. No retries permitted until 2026-01-27 20:05:39.526516107 +0000 UTC m=+164.916769263 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs") pod "network-metrics-daemon-gsrf9" (UID: "93412db5-52e2-4b3a-aee4-3c43f090750e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.537128 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-4zgzv" podStartSLOduration=78.537111533 podStartE2EDuration="1m18.537111533s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:35.535772024 +0000 UTC m=+100.926025190" watchObservedRunningTime="2026-01-27 20:04:35.537111533 +0000 UTC m=+100.927364689" Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.802734 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:35 crc kubenswrapper[4793]: I0127 20:04:35.802753 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:35 crc kubenswrapper[4793]: E0127 20:04:35.803995 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:35 crc kubenswrapper[4793]: E0127 20:04:35.804344 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:36 crc kubenswrapper[4793]: I0127 20:04:36.802400 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:36 crc kubenswrapper[4793]: I0127 20:04:36.802406 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:36 crc kubenswrapper[4793]: E0127 20:04:36.802567 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:36 crc kubenswrapper[4793]: E0127 20:04:36.802702 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:37 crc kubenswrapper[4793]: I0127 20:04:37.802641 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:37 crc kubenswrapper[4793]: I0127 20:04:37.802686 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:37 crc kubenswrapper[4793]: E0127 20:04:37.802843 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:37 crc kubenswrapper[4793]: E0127 20:04:37.802930 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:38 crc kubenswrapper[4793]: I0127 20:04:38.802364 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:38 crc kubenswrapper[4793]: I0127 20:04:38.802365 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:38 crc kubenswrapper[4793]: E0127 20:04:38.802515 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:38 crc kubenswrapper[4793]: E0127 20:04:38.802677 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:39 crc kubenswrapper[4793]: I0127 20:04:39.803093 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:39 crc kubenswrapper[4793]: E0127 20:04:39.803229 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:39 crc kubenswrapper[4793]: I0127 20:04:39.803356 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:39 crc kubenswrapper[4793]: E0127 20:04:39.803659 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:40 crc kubenswrapper[4793]: I0127 20:04:40.802661 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:40 crc kubenswrapper[4793]: I0127 20:04:40.802661 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:40 crc kubenswrapper[4793]: E0127 20:04:40.803143 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:40 crc kubenswrapper[4793]: E0127 20:04:40.803282 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:41 crc kubenswrapper[4793]: I0127 20:04:41.802719 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:41 crc kubenswrapper[4793]: I0127 20:04:41.802749 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:41 crc kubenswrapper[4793]: E0127 20:04:41.802961 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:41 crc kubenswrapper[4793]: E0127 20:04:41.803097 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:42 crc kubenswrapper[4793]: I0127 20:04:42.803088 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:42 crc kubenswrapper[4793]: E0127 20:04:42.803324 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:42 crc kubenswrapper[4793]: I0127 20:04:42.803446 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:42 crc kubenswrapper[4793]: E0127 20:04:42.803791 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:43 crc kubenswrapper[4793]: I0127 20:04:43.802872 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:43 crc kubenswrapper[4793]: E0127 20:04:43.803192 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:43 crc kubenswrapper[4793]: I0127 20:04:43.802772 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:43 crc kubenswrapper[4793]: E0127 20:04:43.803780 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:44 crc kubenswrapper[4793]: I0127 20:04:44.802778 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:44 crc kubenswrapper[4793]: I0127 20:04:44.802877 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:44 crc kubenswrapper[4793]: E0127 20:04:44.803000 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:44 crc kubenswrapper[4793]: E0127 20:04:44.803294 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:44 crc kubenswrapper[4793]: I0127 20:04:44.804267 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:04:44 crc kubenswrapper[4793]: E0127 20:04:44.804865 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8glmz_openshift-ovn-kubernetes(4fb300f8-bf40-4c4e-a3e5-4d5149177aae)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" Jan 27 20:04:45 crc kubenswrapper[4793]: I0127 20:04:45.803027 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:45 crc kubenswrapper[4793]: I0127 20:04:45.803173 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:45 crc kubenswrapper[4793]: E0127 20:04:45.804995 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:45 crc kubenswrapper[4793]: E0127 20:04:45.805173 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:46 crc kubenswrapper[4793]: I0127 20:04:46.802190 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:46 crc kubenswrapper[4793]: I0127 20:04:46.802203 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:46 crc kubenswrapper[4793]: E0127 20:04:46.802776 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:46 crc kubenswrapper[4793]: E0127 20:04:46.803071 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:47 crc kubenswrapper[4793]: I0127 20:04:47.803173 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:47 crc kubenswrapper[4793]: I0127 20:04:47.803173 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:47 crc kubenswrapper[4793]: E0127 20:04:47.803917 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:47 crc kubenswrapper[4793]: E0127 20:04:47.804023 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:48 crc kubenswrapper[4793]: I0127 20:04:48.802475 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:48 crc kubenswrapper[4793]: I0127 20:04:48.802488 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:48 crc kubenswrapper[4793]: E0127 20:04:48.802660 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:48 crc kubenswrapper[4793]: E0127 20:04:48.802810 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:49 crc kubenswrapper[4793]: I0127 20:04:49.802446 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:49 crc kubenswrapper[4793]: I0127 20:04:49.802524 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:49 crc kubenswrapper[4793]: E0127 20:04:49.802649 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:49 crc kubenswrapper[4793]: E0127 20:04:49.802756 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:50 crc kubenswrapper[4793]: I0127 20:04:50.802114 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:50 crc kubenswrapper[4793]: I0127 20:04:50.802267 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:50 crc kubenswrapper[4793]: E0127 20:04:50.802359 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:50 crc kubenswrapper[4793]: E0127 20:04:50.802486 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:51 crc kubenswrapper[4793]: I0127 20:04:51.803330 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:51 crc kubenswrapper[4793]: I0127 20:04:51.803513 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:51 crc kubenswrapper[4793]: E0127 20:04:51.803670 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:51 crc kubenswrapper[4793]: E0127 20:04:51.803861 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:52 crc kubenswrapper[4793]: I0127 20:04:52.803028 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:52 crc kubenswrapper[4793]: E0127 20:04:52.803173 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:52 crc kubenswrapper[4793]: I0127 20:04:52.803240 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:52 crc kubenswrapper[4793]: E0127 20:04:52.803379 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:53 crc kubenswrapper[4793]: I0127 20:04:53.802721 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:53 crc kubenswrapper[4793]: E0127 20:04:53.802836 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:53 crc kubenswrapper[4793]: I0127 20:04:53.802885 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:53 crc kubenswrapper[4793]: E0127 20:04:53.803023 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:54 crc kubenswrapper[4793]: I0127 20:04:54.803165 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:54 crc kubenswrapper[4793]: E0127 20:04:54.803328 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:54 crc kubenswrapper[4793]: I0127 20:04:54.803390 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:54 crc kubenswrapper[4793]: E0127 20:04:54.803525 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.579741 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/1.log" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.580365 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/0.log" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.580413 4793 generic.go:334] "Generic (PLEG): container finished" podID="d3e7b749-a397-4db6-8b6e-ddde6b3fdced" containerID="5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd" exitCode=1 Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.580443 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerDied","Data":"5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd"} Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.580474 4793 scope.go:117] "RemoveContainer" containerID="d5ed61eae20801164518bcad9500d782ed62a908c6ec093e053416ea29e0c05b" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.581142 4793 scope.go:117] "RemoveContainer" containerID="5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd" Jan 27 20:04:55 crc kubenswrapper[4793]: E0127 20:04:55.581438 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-7k9v7_openshift-multus(d3e7b749-a397-4db6-8b6e-ddde6b3fdced)\"" pod="openshift-multus/multus-7k9v7" podUID="d3e7b749-a397-4db6-8b6e-ddde6b3fdced" Jan 27 20:04:55 crc kubenswrapper[4793]: E0127 20:04:55.758189 4793 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.803008 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:55 crc kubenswrapper[4793]: I0127 20:04:55.803031 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:55 crc kubenswrapper[4793]: E0127 20:04:55.804128 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:55 crc kubenswrapper[4793]: E0127 20:04:55.804291 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:56 crc kubenswrapper[4793]: E0127 20:04:56.070906 4793 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 20:04:56 crc kubenswrapper[4793]: I0127 20:04:56.585638 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/1.log" Jan 27 20:04:56 crc kubenswrapper[4793]: I0127 20:04:56.803318 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:56 crc kubenswrapper[4793]: I0127 20:04:56.803391 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:56 crc kubenswrapper[4793]: E0127 20:04:56.803469 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:56 crc kubenswrapper[4793]: E0127 20:04:56.803637 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:57 crc kubenswrapper[4793]: I0127 20:04:57.803266 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:57 crc kubenswrapper[4793]: I0127 20:04:57.803294 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:57 crc kubenswrapper[4793]: E0127 20:04:57.803454 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:57 crc kubenswrapper[4793]: E0127 20:04:57.803611 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:04:57 crc kubenswrapper[4793]: I0127 20:04:57.804277 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.593413 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/3.log" Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.595748 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerStarted","Data":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.597410 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.634130 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podStartSLOduration=101.634105674 podStartE2EDuration="1m41.634105674s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:04:58.631043209 +0000 UTC m=+124.021296365" watchObservedRunningTime="2026-01-27 20:04:58.634105674 +0000 UTC m=+124.024358830" Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.803140 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:04:58 crc kubenswrapper[4793]: E0127 20:04:58.803290 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:04:58 crc kubenswrapper[4793]: I0127 20:04:58.803149 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:04:58 crc kubenswrapper[4793]: E0127 20:04:58.803372 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:04:59 crc kubenswrapper[4793]: I0127 20:04:59.039908 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gsrf9"] Jan 27 20:04:59 crc kubenswrapper[4793]: I0127 20:04:59.040108 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:04:59 crc kubenswrapper[4793]: E0127 20:04:59.040228 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:04:59 crc kubenswrapper[4793]: I0127 20:04:59.802489 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:04:59 crc kubenswrapper[4793]: E0127 20:04:59.802663 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:00 crc kubenswrapper[4793]: I0127 20:05:00.802469 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:00 crc kubenswrapper[4793]: I0127 20:05:00.802529 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:00 crc kubenswrapper[4793]: E0127 20:05:00.802633 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:00 crc kubenswrapper[4793]: I0127 20:05:00.802720 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:00 crc kubenswrapper[4793]: E0127 20:05:00.802877 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:00 crc kubenswrapper[4793]: E0127 20:05:00.803020 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:01 crc kubenswrapper[4793]: E0127 20:05:01.072126 4793 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 20:05:01 crc kubenswrapper[4793]: I0127 20:05:01.802225 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:01 crc kubenswrapper[4793]: E0127 20:05:01.802345 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:02 crc kubenswrapper[4793]: I0127 20:05:02.802953 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:02 crc kubenswrapper[4793]: I0127 20:05:02.803033 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:02 crc kubenswrapper[4793]: E0127 20:05:02.803448 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:02 crc kubenswrapper[4793]: I0127 20:05:02.803060 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:02 crc kubenswrapper[4793]: E0127 20:05:02.803563 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:02 crc kubenswrapper[4793]: E0127 20:05:02.803675 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:03 crc kubenswrapper[4793]: I0127 20:05:03.802628 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:03 crc kubenswrapper[4793]: E0127 20:05:03.802771 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:04 crc kubenswrapper[4793]: I0127 20:05:04.803082 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:04 crc kubenswrapper[4793]: I0127 20:05:04.803125 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:04 crc kubenswrapper[4793]: I0127 20:05:04.803131 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:04 crc kubenswrapper[4793]: E0127 20:05:04.803212 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:04 crc kubenswrapper[4793]: E0127 20:05:04.803327 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:04 crc kubenswrapper[4793]: E0127 20:05:04.803494 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:05 crc kubenswrapper[4793]: I0127 20:05:05.802903 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:05 crc kubenswrapper[4793]: E0127 20:05:05.805297 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:06 crc kubenswrapper[4793]: E0127 20:05:06.072900 4793 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 20:05:06 crc kubenswrapper[4793]: I0127 20:05:06.802697 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:06 crc kubenswrapper[4793]: I0127 20:05:06.802756 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:06 crc kubenswrapper[4793]: E0127 20:05:06.803176 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:06 crc kubenswrapper[4793]: I0127 20:05:06.802756 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:06 crc kubenswrapper[4793]: E0127 20:05:06.803806 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:06 crc kubenswrapper[4793]: E0127 20:05:06.803312 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:07 crc kubenswrapper[4793]: I0127 20:05:07.803026 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:07 crc kubenswrapper[4793]: E0127 20:05:07.803343 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:07 crc kubenswrapper[4793]: I0127 20:05:07.803475 4793 scope.go:117] "RemoveContainer" containerID="5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd" Jan 27 20:05:08 crc kubenswrapper[4793]: I0127 20:05:08.630484 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/1.log" Jan 27 20:05:08 crc kubenswrapper[4793]: I0127 20:05:08.630572 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerStarted","Data":"b9a77f189b3970dae25374aabc946e103c7d2dca0881cb4c4fc87338fc15237a"} Jan 27 20:05:08 crc kubenswrapper[4793]: I0127 20:05:08.802623 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:08 crc kubenswrapper[4793]: I0127 20:05:08.802704 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:08 crc kubenswrapper[4793]: I0127 20:05:08.802765 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:08 crc kubenswrapper[4793]: E0127 20:05:08.802856 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:08 crc kubenswrapper[4793]: E0127 20:05:08.803117 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:08 crc kubenswrapper[4793]: E0127 20:05:08.803167 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:09 crc kubenswrapper[4793]: I0127 20:05:09.802713 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:09 crc kubenswrapper[4793]: E0127 20:05:09.803186 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 20:05:10 crc kubenswrapper[4793]: I0127 20:05:10.802189 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:10 crc kubenswrapper[4793]: I0127 20:05:10.802246 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:10 crc kubenswrapper[4793]: I0127 20:05:10.802249 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:10 crc kubenswrapper[4793]: E0127 20:05:10.802344 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 20:05:10 crc kubenswrapper[4793]: E0127 20:05:10.802401 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gsrf9" podUID="93412db5-52e2-4b3a-aee4-3c43f090750e" Jan 27 20:05:10 crc kubenswrapper[4793]: E0127 20:05:10.802464 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 20:05:11 crc kubenswrapper[4793]: I0127 20:05:11.802479 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:11 crc kubenswrapper[4793]: I0127 20:05:11.804478 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 27 20:05:11 crc kubenswrapper[4793]: I0127 20:05:11.804879 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.802806 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.802829 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.802830 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.805054 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.805191 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.805263 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 27 20:05:12 crc kubenswrapper[4793]: I0127 20:05:12.806400 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.004611 4793 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.040247 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v8r5p"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.040746 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.044495 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.044598 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.044695 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.044760 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.044797 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.045141 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.072986 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.075839 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.075841 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sghkk"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.076592 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.077366 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.077815 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.077905 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.078539 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.078845 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.079340 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.081700 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.082014 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.082272 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.082599 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.082911 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.083442 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.088773 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090023 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090021 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-client\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090216 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhzt7\" (UniqueName: \"kubernetes.io/projected/af46e162-c595-4a44-98e5-a30e531aa9ed-kube-api-access-xhzt7\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090256 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-encryption-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090293 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-serving-cert\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090147 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090365 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-serving-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090394 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090418 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-audit-dir\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090174 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090469 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-image-import-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090490 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-node-pullsecrets\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090523 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-audit\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090193 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090601 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090232 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090237 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.090292 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.091851 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.091998 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092116 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092168 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092020 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092313 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092039 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092056 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.092078 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.093268 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.093380 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.093500 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.093521 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.093628 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098325 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098395 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098735 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098965 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098755 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.098777 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.099336 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.099587 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.100406 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.100836 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.101191 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.101583 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.102302 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.102705 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.102983 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.103147 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.103401 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n24xk"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.103945 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.104618 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4nvlb"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.105152 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.105793 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.106132 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.107041 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.107766 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.114867 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.117028 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.117183 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.118830 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119001 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119114 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119255 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119387 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119527 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119690 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119830 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.119971 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.120942 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121211 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121390 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121417 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121583 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121654 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121776 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.121893 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.122045 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.122193 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.122390 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.122593 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.143260 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.161758 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.162217 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.162811 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.162923 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163137 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163401 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163423 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163590 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163743 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.163815 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.164125 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.164376 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.164512 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.164660 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.165290 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.165496 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.166352 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.166989 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.167134 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.167186 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.167277 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.167301 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.167494 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.168503 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.170386 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.171764 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.171792 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.174877 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.176840 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c22sx"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.177232 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.177483 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.177812 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.178255 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.178450 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.180955 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-89kfv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.181604 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-9fj82"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.181903 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.182226 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.182448 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.182617 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.182910 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.183499 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.183503 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.183504 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.183522 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.183576 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188025 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188051 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188243 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188322 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188361 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188601 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188720 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188819 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188897 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.188964 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.190984 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-node-pullsecrets\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191027 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-audit\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191081 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191106 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-client\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191131 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhzt7\" (UniqueName: \"kubernetes.io/projected/af46e162-c595-4a44-98e5-a30e531aa9ed-kube-api-access-xhzt7\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191155 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-encryption-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191176 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-serving-cert\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191200 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-serving-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191223 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191246 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-audit-dir\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191284 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-image-import-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.191468 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-node-pullsecrets\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.192313 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-image-import-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.195490 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-audit\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.195645 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/af46e162-c595-4a44-98e5-a30e531aa9ed-audit-dir\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.196255 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.196343 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-serving-ca\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.196487 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af46e162-c595-4a44-98e5-a30e531aa9ed-trusted-ca-bundle\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.198002 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-etcd-client\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.198440 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-serving-cert\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.199670 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-mq8nr"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.199755 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/af46e162-c595-4a44-98e5-a30e531aa9ed-encryption-config\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.200407 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.201298 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.201904 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.204324 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.204711 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.206642 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.206795 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.209539 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.213886 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.234035 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gkcks"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.234133 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.234293 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.235856 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.235916 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.236154 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.236460 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.236506 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.236607 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.237277 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.237835 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.237925 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.241542 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.244814 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gzpt9"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.245029 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.245035 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.245586 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.245722 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.245952 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.246334 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.246453 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.248519 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.251768 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sghkk"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.252141 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v8r5p"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.253413 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.254512 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.254669 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.255774 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.262650 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.262725 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-sn9b2"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.263673 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-6kf4l"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.263923 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.264195 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.264308 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.265360 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.266476 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.267760 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.268283 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.269071 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.269191 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.270611 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.271628 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.272840 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4nvlb"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.273861 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.274808 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.276043 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.278672 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.279398 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c22sx"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.280907 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-nzg26"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.282912 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.282949 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-89kfv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.283023 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.283431 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.283964 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.285229 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-95vph"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.289476 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n24xk"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.289627 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.293141 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.293742 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.299253 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gkcks"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.302666 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.303118 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.304457 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.306357 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.307576 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.309011 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gzpt9"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.310368 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-nzg26"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.312145 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-95vph"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.313484 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.316955 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.318580 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-mq8nr"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.319955 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.321329 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sn9b2"] Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.322610 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.343036 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.402956 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.422932 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.443013 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.463499 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.484674 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.502906 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.522971 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.543145 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.563791 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.583749 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.611259 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.624086 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.643961 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.663652 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.683332 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.702257 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.723274 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.743650 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.763169 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.783197 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.803773 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.823140 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.843478 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.862901 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.883041 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.902536 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.923417 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.942988 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.982799 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhzt7\" (UniqueName: \"kubernetes.io/projected/af46e162-c595-4a44-98e5-a30e531aa9ed-kube-api-access-xhzt7\") pod \"apiserver-76f77b778f-v8r5p\" (UID: \"af46e162-c595-4a44-98e5-a30e531aa9ed\") " pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:15 crc kubenswrapper[4793]: I0127 20:05:15.983283 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.002298 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.023183 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.043155 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.062808 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.084483 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.109402 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.123452 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.142446 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.162889 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.182525 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.202846 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.223101 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.241264 4793 request.go:700] Waited for 1.006623696s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dconfig&limit=500&resourceVersion=0 Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.243011 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.263383 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.281890 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.284994 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.303113 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.323822 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.343607 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.364773 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.383011 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.404823 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.429922 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.442813 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.459678 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-v8r5p"] Jan 27 20:05:16 crc kubenswrapper[4793]: W0127 20:05:16.470478 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf46e162_c595_4a44_98e5_a30e531aa9ed.slice/crio-8b7f238e125a5d6b379f9679317b90ca1fc591f3beeb1b7319a358438acc8026 WatchSource:0}: Error finding container 8b7f238e125a5d6b379f9679317b90ca1fc591f3beeb1b7319a358438acc8026: Status 404 returned error can't find the container with id 8b7f238e125a5d6b379f9679317b90ca1fc591f3beeb1b7319a358438acc8026 Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.470573 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.482876 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.503040 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.523173 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.542696 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.563365 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.583309 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.603239 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.623048 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.642679 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.658063 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" event={"ID":"af46e162-c595-4a44-98e5-a30e531aa9ed","Type":"ContainerStarted","Data":"8b7f238e125a5d6b379f9679317b90ca1fc591f3beeb1b7319a358438acc8026"} Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.662449 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.683119 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.703766 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.723144 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.743275 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.763593 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.782987 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.803471 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.822218 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.843674 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.863773 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.884460 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.903768 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.923892 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.943130 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.964282 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 27 20:05:16 crc kubenswrapper[4793]: I0127 20:05:16.983751 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.003325 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.022805 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.043510 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.064166 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.083296 4793 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.103138 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.212935 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-trusted-ca\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.212983 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/74c3798b-d26e-4be2-817f-a9004fd819c1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213016 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-encryption-config\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213056 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213078 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9xtb\" (UniqueName: \"kubernetes.io/projected/f8b4a330-77aa-4fe5-af50-14f41e5f727e-kube-api-access-l9xtb\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213096 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213112 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzzjk\" (UniqueName: \"kubernetes.io/projected/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-kube-api-access-nzzjk\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213139 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-serving-cert\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213156 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213172 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-client\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213283 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea550d07-6ac0-477d-a50d-dba8b5a528a1-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213316 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/30eb86e9-4989-4a95-bd07-07ff6a872298-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213333 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213354 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jzk2\" (UniqueName: \"kubernetes.io/projected/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-kube-api-access-6jzk2\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213383 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8b4a330-77aa-4fe5-af50-14f41e5f727e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213437 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrx42\" (UniqueName: \"kubernetes.io/projected/c26f1fef-b150-4021-8cfc-c08128248f8a-kube-api-access-mrx42\") pod \"migrator-59844c95c7-2qgvl\" (UID: \"c26f1fef-b150-4021-8cfc-c08128248f8a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213454 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213484 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8b4a330-77aa-4fe5-af50-14f41e5f727e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213512 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wth5c\" (UniqueName: \"kubernetes.io/projected/8bb08394-518f-4f9c-811d-9bcbf765aad0-kube-api-access-wth5c\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213563 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213600 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213622 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-trusted-ca\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213699 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213735 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-metrics-tls\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213768 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213809 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-policies\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213828 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213861 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213881 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea550d07-6ac0-477d-a50d-dba8b5a528a1-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213902 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213925 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213950 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-client\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.213980 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214011 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz5fl\" (UniqueName: \"kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214037 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214063 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214089 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-auth-proxy-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214114 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214141 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp2nj\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-kube-api-access-fp2nj\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214164 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-config\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214192 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-config\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214215 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-images\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214230 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-dir\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214247 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30eb86e9-4989-4a95-bd07-07ff6a872298-serving-cert\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214267 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214360 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/8087e440-2261-4895-a480-c638b7615f67-machine-approver-tls\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214410 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214441 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pkzz\" (UniqueName: \"kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214524 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrptt\" (UniqueName: \"kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214566 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214587 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214621 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzct8\" (UniqueName: \"kubernetes.io/projected/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-kube-api-access-pzct8\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214656 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjhn8\" (UniqueName: \"kubernetes.io/projected/dbcb231b-60c1-4027-b85d-bbdb1c193304-kube-api-access-gjhn8\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214682 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214731 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214755 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff5f2\" (UniqueName: \"kubernetes.io/projected/74c3798b-d26e-4be2-817f-a9004fd819c1-kube-api-access-ff5f2\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214784 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-images\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214811 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-config\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214847 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb6z7\" (UniqueName: \"kubernetes.io/projected/7be50072-4d5b-4ef3-a534-bdce40d627cb-kube-api-access-kb6z7\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214938 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.214962 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-proxy-tls\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215091 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215177 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215245 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215321 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbcb231b-60c1-4027-b85d-bbdb1c193304-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215390 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trhs5\" (UniqueName: \"kubernetes.io/projected/8087e440-2261-4895-a480-c638b7615f67-kube-api-access-trhs5\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215448 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-serving-cert\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215485 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcgms\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215530 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.215571 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:17.715513832 +0000 UTC m=+143.105767208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215600 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215629 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-serving-cert\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215707 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215746 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215774 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbcb231b-60c1-4027-b85d-bbdb1c193304-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215801 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kns7n\" (UniqueName: \"kubernetes.io/projected/30eb86e9-4989-4a95-bd07-07ff6a872298-kube-api-access-kns7n\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.215967 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7be50072-4d5b-4ef3-a534-bdce40d627cb-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.216010 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea550d07-6ac0-477d-a50d-dba8b5a528a1-config\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.216049 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.216072 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.216093 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-service-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317537 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.317734 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:17.817691075 +0000 UTC m=+143.207944231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317809 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-metrics-tls\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317860 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317894 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-node-bootstrap-token\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317924 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-apiservice-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317957 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsw8l\" (UniqueName: \"kubernetes.io/projected/30a645a0-5c78-4576-ba66-56e95fb6b07a-kube-api-access-lsw8l\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.317978 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-client\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318002 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318023 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz5fl\" (UniqueName: \"kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318044 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318141 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318201 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318232 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp2nj\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-kube-api-access-fp2nj\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318259 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-certs\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318284 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb9c4\" (UniqueName: \"kubernetes.io/projected/02367c13-c0f2-4600-9baa-1a55f0f50e8b-kube-api-access-hb9c4\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318306 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-config\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318334 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-images\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318803 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.318973 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319019 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-config\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319046 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319067 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/104d79b7-c0c6-4cde-a7c2-d60a06a38647-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319096 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrptt\" (UniqueName: \"kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319120 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319150 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319180 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319215 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff5f2\" (UniqueName: \"kubernetes.io/projected/74c3798b-d26e-4be2-817f-a9004fd819c1-kube-api-access-ff5f2\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319239 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/458d18eb-2c58-43d6-889b-b1ce6f367050-tmpfs\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319267 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-metrics-certs\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319292 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-images\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319292 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319312 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/104d79b7-c0c6-4cde-a7c2-d60a06a38647-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319567 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e68f3762-2025-4800-98ff-ca440c176b45-signing-key\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319736 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-config\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319794 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb6z7\" (UniqueName: \"kubernetes.io/projected/7be50072-4d5b-4ef3-a534-bdce40d627cb-kube-api-access-kb6z7\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319868 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-proxy-tls\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319899 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319927 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319959 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.319986 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1286791-4cc8-4da3-8450-5bf2e4dc577d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320191 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320261 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trhs5\" (UniqueName: \"kubernetes.io/projected/8087e440-2261-4895-a480-c638b7615f67-kube-api-access-trhs5\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320417 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-config\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320619 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320655 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-webhook-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320685 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320739 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320783 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320818 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7be50072-4d5b-4ef3-a534-bdce40d627cb-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320844 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320769 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-images\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324071 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.320985 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324160 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slr7v\" (UniqueName: \"kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324188 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4ntr\" (UniqueName: \"kubernetes.io/projected/b1286791-4cc8-4da3-8450-5bf2e4dc577d-kube-api-access-d4ntr\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324219 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-787z9\" (UniqueName: \"kubernetes.io/projected/458d18eb-2c58-43d6-889b-b1ce6f367050-kube-api-access-787z9\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324258 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-config\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324292 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-registration-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324321 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-csi-data-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324364 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324418 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-encryption-config\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324467 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.324507 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzzjk\" (UniqueName: \"kubernetes.io/projected/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-kube-api-access-nzzjk\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.325817 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-images\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.326538 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.327268 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-metrics-tls\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.327880 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.328431 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-client\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.328916 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.329074 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7be50072-4d5b-4ef3-a534-bdce40d627cb-config\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.329449 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.330722 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.330851 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.330992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.330973 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-proxy-tls\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331205 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea550d07-6ac0-477d-a50d-dba8b5a528a1-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331356 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331377 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331432 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba1e1437-9755-498f-b07f-40997bdfa64c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331474 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-socket-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331742 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.331999 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr7jb\" (UniqueName: \"kubernetes.io/projected/ce69b998-69b8-46f8-b72b-83aa741479da-kube-api-access-tr7jb\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332153 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332150 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7be50072-4d5b-4ef3-a534-bdce40d627cb-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332207 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-srv-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332242 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332280 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332325 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/df0b32f8-987a-415d-a22e-c396f3bbaa8f-kube-api-access-25vqs\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332403 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-serving-cert\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332464 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df0b32f8-987a-415d-a22e-c396f3bbaa8f-config-volume\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332538 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shsmh\" (UniqueName: \"kubernetes.io/projected/eac40cfc-8509-4b68-9962-4c2e602d155f-kube-api-access-shsmh\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332641 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/87824188-046e-4f32-b8da-016605488fca-cert\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332691 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332695 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8b4a330-77aa-4fe5-af50-14f41e5f727e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332751 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86xcs\" (UniqueName: \"kubernetes.io/projected/0c277ffe-d148-4407-9d6e-bb81b17724ac-kube-api-access-86xcs\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332808 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d14aca5a-f217-4b87-bbcd-431b01ec7511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332856 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-plugins-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332941 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.332990 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-stats-auth\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333055 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333130 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-policies\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333188 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333245 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333270 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea550d07-6ac0-477d-a50d-dba8b5a528a1-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333292 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m4b8\" (UniqueName: \"kubernetes.io/projected/a2c79a98-a142-4c13-a989-b2c887f03d46-kube-api-access-6m4b8\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333319 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333395 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333454 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-mountpoint-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333481 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2c79a98-a142-4c13-a989-b2c887f03d46-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.334803 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335256 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.333536 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgz5l\" (UniqueName: \"kubernetes.io/projected/d14aca5a-f217-4b87-bbcd-431b01ec7511-kube-api-access-sgz5l\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335682 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk2wp\" (UniqueName: \"kubernetes.io/projected/87824188-046e-4f32-b8da-016605488fca-kube-api-access-rk2wp\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335739 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-auth-proxy-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335780 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-config\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336347 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-dir\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336383 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30eb86e9-4989-4a95-bd07-07ff6a872298-serving-cert\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kc2rq\" (UniqueName: \"kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336443 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336468 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/8087e440-2261-4895-a480-c638b7615f67-machine-approver-tls\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336504 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pkzz\" (UniqueName: \"kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.335933 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8b4a330-77aa-4fe5-af50-14f41e5f727e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336678 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6ffbe35-d1b5-48b0-8481-d788b0801196-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336709 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/eac40cfc-8509-4b68-9962-4c2e602d155f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336734 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgkjl\" (UniqueName: \"kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336760 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336782 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzct8\" (UniqueName: \"kubernetes.io/projected/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-kube-api-access-pzct8\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336811 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjhn8\" (UniqueName: \"kubernetes.io/projected/dbcb231b-60c1-4027-b85d-bbdb1c193304-kube-api-access-gjhn8\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336836 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336862 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/df0b32f8-987a-415d-a22e-c396f3bbaa8f-metrics-tls\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336883 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336910 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336954 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.336981 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-service-ca-bundle\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337214 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337262 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337294 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e68f3762-2025-4800-98ff-ca440c176b45-signing-cabundle\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337319 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbcb231b-60c1-4027-b85d-bbdb1c193304-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337346 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7nmx\" (UniqueName: \"kubernetes.io/projected/ba1e1437-9755-498f-b07f-40997bdfa64c-kube-api-access-x7nmx\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337365 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwtkp\" (UniqueName: \"kubernetes.io/projected/e68f3762-2025-4800-98ff-ca440c176b45-kube-api-access-qwtkp\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337387 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-serving-cert\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337420 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcgms\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337442 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-serving-cert\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337461 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c54305b3-f1c3-44b0-a77a-47f4cd78794c-serving-cert\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337485 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d14aca5a-f217-4b87-bbcd-431b01ec7511-proxy-tls\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337508 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6pbf\" (UniqueName: \"kubernetes.io/projected/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-kube-api-access-q6pbf\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337562 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337589 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kns7n\" (UniqueName: \"kubernetes.io/projected/30eb86e9-4989-4a95-bd07-07ff6a872298-kube-api-access-kns7n\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.337861 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-policies\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.338068 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.338434 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-auth-proxy-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.338508 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.338874 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:17.83885884 +0000 UTC m=+143.229111996 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.338935 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8087e440-2261-4895-a480-c638b7615f67-config\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-config\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339165 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-audit-dir\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339582 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339794 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339898 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea550d07-6ac0-477d-a50d-dba8b5a528a1-config\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339917 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339934 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbcb231b-60c1-4027-b85d-bbdb1c193304-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.339983 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptzm5\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-kube-api-access-ptzm5\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.340009 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-299sb\" (UniqueName: \"kubernetes.io/projected/9612bf32-849b-40f1-bf8a-cada1f25acf5-kube-api-access-299sb\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.340612 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea550d07-6ac0-477d-a50d-dba8b5a528a1-config\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.340792 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbcb231b-60c1-4027-b85d-bbdb1c193304-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341205 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ffbe35-d1b5-48b0-8481-d788b0801196-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341258 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341300 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-service-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341453 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-profile-collector-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341494 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-trusted-ca\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341523 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/74c3798b-d26e-4be2-817f-a9004fd819c1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341566 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6ffbe35-d1b5-48b0-8481-d788b0801196-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341715 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/104d79b7-c0c6-4cde-a7c2-d60a06a38647-config\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.341824 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9xtb\" (UniqueName: \"kubernetes.io/projected/f8b4a330-77aa-4fe5-af50-14f41e5f727e-kube-api-access-l9xtb\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.342439 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bb08394-518f-4f9c-811d-9bcbf765aad0-etcd-service-ca\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.343078 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.343383 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.344211 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-trusted-ca\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.344380 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30eb86e9-4989-4a95-bd07-07ff6a872298-serving-cert\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.345712 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-service-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.345845 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c277ffe-d148-4407-9d6e-bb81b17724ac-metrics-tls\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.345918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-serving-cert\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.345950 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-client\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.345978 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346011 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdhv4\" (UniqueName: \"kubernetes.io/projected/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-kube-api-access-zdhv4\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346061 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jzk2\" (UniqueName: \"kubernetes.io/projected/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-kube-api-access-6jzk2\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346110 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8b4a330-77aa-4fe5-af50-14f41e5f727e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346146 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/30eb86e9-4989-4a95-bd07-07ff6a872298-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346180 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-default-certificate\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346217 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrx42\" (UniqueName: \"kubernetes.io/projected/c26f1fef-b150-4021-8cfc-c08128248f8a-kube-api-access-mrx42\") pod \"migrator-59844c95c7-2qgvl\" (UID: \"c26f1fef-b150-4021-8cfc-c08128248f8a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346298 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346346 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9c2l\" (UniqueName: \"kubernetes.io/projected/a043f31e-8e0a-41eb-a2ad-73f6d5795b0a-kube-api-access-p9c2l\") pod \"downloads-7954f5f757-mq8nr\" (UID: \"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a\") " pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346373 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2c79a98-a142-4c13-a989-b2c887f03d46-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346439 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wth5c\" (UniqueName: \"kubernetes.io/projected/8bb08394-518f-4f9c-811d-9bcbf765aad0-kube-api-access-wth5c\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346522 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzfd2\" (UniqueName: \"kubernetes.io/projected/c54305b3-f1c3-44b0-a77a-47f4cd78794c-kube-api-access-rzfd2\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346581 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346652 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346683 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346715 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-srv-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.346917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-trusted-ca\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.347762 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.349024 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.349314 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/30eb86e9-4989-4a95-bd07-07ff6a872298-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.349502 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/74c3798b-d26e-4be2-817f-a9004fd819c1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.349833 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-etcd-client\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.349897 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/8087e440-2261-4895-a480-c638b7615f67-machine-approver-tls\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.350079 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.350331 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.350617 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea550d07-6ac0-477d-a50d-dba8b5a528a1-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.351103 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.351447 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-trusted-ca\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.351495 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-encryption-config\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.352844 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-serving-cert\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.352867 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bb08394-518f-4f9c-811d-9bcbf765aad0-serving-cert\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.352908 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbcb231b-60c1-4027-b85d-bbdb1c193304-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.353420 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8b4a330-77aa-4fe5-af50-14f41e5f727e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.354265 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-serving-cert\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.366985 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.367233 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.367540 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.385102 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp2nj\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-kube-api-access-fp2nj\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.389140 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz5fl\" (UniqueName: \"kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl\") pod \"oauth-openshift-558db77b4-jtmmw\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.402796 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb6z7\" (UniqueName: \"kubernetes.io/projected/7be50072-4d5b-4ef3-a534-bdce40d627cb-kube-api-access-kb6z7\") pod \"machine-api-operator-5694c8668f-sghkk\" (UID: \"7be50072-4d5b-4ef3-a534-bdce40d627cb\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.418179 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff5f2\" (UniqueName: \"kubernetes.io/projected/74c3798b-d26e-4be2-817f-a9004fd819c1-kube-api-access-ff5f2\") pod \"cluster-samples-operator-665b6dd947-swb5p\" (UID: \"74c3798b-d26e-4be2-817f-a9004fd819c1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.435338 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.444923 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trhs5\" (UniqueName: \"kubernetes.io/projected/8087e440-2261-4895-a480-c638b7615f67-kube-api-access-trhs5\") pod \"machine-approver-56656f9798-l2x22\" (UID: \"8087e440-2261-4895-a480-c638b7615f67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448062 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448285 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9c2l\" (UniqueName: \"kubernetes.io/projected/a043f31e-8e0a-41eb-a2ad-73f6d5795b0a-kube-api-access-p9c2l\") pod \"downloads-7954f5f757-mq8nr\" (UID: \"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a\") " pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448315 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzfd2\" (UniqueName: \"kubernetes.io/projected/c54305b3-f1c3-44b0-a77a-47f4cd78794c-kube-api-access-rzfd2\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448334 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2c79a98-a142-4c13-a989-b2c887f03d46-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448357 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-srv-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448383 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-node-bootstrap-token\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.448414 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:17.948387914 +0000 UTC m=+143.338641070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448479 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-apiservice-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448519 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsw8l\" (UniqueName: \"kubernetes.io/projected/30a645a0-5c78-4576-ba66-56e95fb6b07a-kube-api-access-lsw8l\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448606 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448685 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-certs\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448704 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb9c4\" (UniqueName: \"kubernetes.io/projected/02367c13-c0f2-4600-9baa-1a55f0f50e8b-kube-api-access-hb9c4\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448719 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-config\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448763 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448780 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448799 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/104d79b7-c0c6-4cde-a7c2-d60a06a38647-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448820 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/458d18eb-2c58-43d6-889b-b1ce6f367050-tmpfs\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448836 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-metrics-certs\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448858 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/104d79b7-c0c6-4cde-a7c2-d60a06a38647-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e68f3762-2025-4800-98ff-ca440c176b45-signing-key\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448943 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1286791-4cc8-4da3-8450-5bf2e4dc577d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448964 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-webhook-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.448985 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449017 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449041 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4ntr\" (UniqueName: \"kubernetes.io/projected/b1286791-4cc8-4da3-8450-5bf2e4dc577d-kube-api-access-d4ntr\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449059 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-787z9\" (UniqueName: \"kubernetes.io/projected/458d18eb-2c58-43d6-889b-b1ce6f367050-kube-api-access-787z9\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449082 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slr7v\" (UniqueName: \"kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449103 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-config\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449124 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-registration-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449144 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-csi-data-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449208 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba1e1437-9755-498f-b07f-40997bdfa64c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449268 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-socket-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449287 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr7jb\" (UniqueName: \"kubernetes.io/projected/ce69b998-69b8-46f8-b72b-83aa741479da-kube-api-access-tr7jb\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449309 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-srv-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449330 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/df0b32f8-987a-415d-a22e-c396f3bbaa8f-kube-api-access-25vqs\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449394 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-serving-cert\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449426 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449451 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86xcs\" (UniqueName: \"kubernetes.io/projected/0c277ffe-d148-4407-9d6e-bb81b17724ac-kube-api-access-86xcs\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449477 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d14aca5a-f217-4b87-bbcd-431b01ec7511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449518 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df0b32f8-987a-415d-a22e-c396f3bbaa8f-config-volume\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449560 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shsmh\" (UniqueName: \"kubernetes.io/projected/eac40cfc-8509-4b68-9962-4c2e602d155f-kube-api-access-shsmh\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449559 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-csi-data-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449608 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/87824188-046e-4f32-b8da-016605488fca-cert\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449637 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-plugins-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449666 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449689 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-stats-auth\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449714 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m4b8\" (UniqueName: \"kubernetes.io/projected/a2c79a98-a142-4c13-a989-b2c887f03d46-kube-api-access-6m4b8\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449744 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449795 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-mountpoint-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449823 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2c79a98-a142-4c13-a989-b2c887f03d46-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449865 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgz5l\" (UniqueName: \"kubernetes.io/projected/d14aca5a-f217-4b87-bbcd-431b01ec7511-kube-api-access-sgz5l\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449888 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk2wp\" (UniqueName: \"kubernetes.io/projected/87824188-046e-4f32-b8da-016605488fca-kube-api-access-rk2wp\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449914 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kc2rq\" (UniqueName: \"kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449947 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449970 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6ffbe35-d1b5-48b0-8481-d788b0801196-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.449992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/eac40cfc-8509-4b68-9962-4c2e602d155f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450013 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgkjl\" (UniqueName: \"kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450057 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450080 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/df0b32f8-987a-415d-a22e-c396f3bbaa8f-metrics-tls\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450120 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450141 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450162 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-service-ca-bundle\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450195 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e68f3762-2025-4800-98ff-ca440c176b45-signing-cabundle\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450225 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7nmx\" (UniqueName: \"kubernetes.io/projected/ba1e1437-9755-498f-b07f-40997bdfa64c-kube-api-access-x7nmx\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwtkp\" (UniqueName: \"kubernetes.io/projected/e68f3762-2025-4800-98ff-ca440c176b45-kube-api-access-qwtkp\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450298 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c54305b3-f1c3-44b0-a77a-47f4cd78794c-serving-cert\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450320 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d14aca5a-f217-4b87-bbcd-431b01ec7511-proxy-tls\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450332 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-config\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450342 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6pbf\" (UniqueName: \"kubernetes.io/projected/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-kube-api-access-q6pbf\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450392 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ffbe35-d1b5-48b0-8481-d788b0801196-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450417 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptzm5\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-kube-api-access-ptzm5\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450439 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-299sb\" (UniqueName: \"kubernetes.io/projected/9612bf32-849b-40f1-bf8a-cada1f25acf5-kube-api-access-299sb\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450462 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-profile-collector-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450495 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-service-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450516 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6ffbe35-d1b5-48b0-8481-d788b0801196-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450536 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/104d79b7-c0c6-4cde-a7c2-d60a06a38647-config\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450665 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c277ffe-d148-4407-9d6e-bb81b17724ac-metrics-tls\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450691 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450713 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdhv4\" (UniqueName: \"kubernetes.io/projected/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-kube-api-access-zdhv4\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450755 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-default-certificate\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.451234 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2c79a98-a142-4c13-a989-b2c887f03d46-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.451889 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-apiservice-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.452119 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1286791-4cc8-4da3-8450-5bf2e4dc577d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.452219 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.452522 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-socket-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.453285 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.453319 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/458d18eb-2c58-43d6-889b-b1ce6f367050-tmpfs\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.453411 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-config\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.454004 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-metrics-certs\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.454420 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-certs\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.454465 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.454478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.450598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-registration-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.454528 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c54305b3-f1c3-44b0-a77a-47f4cd78794c-service-ca-bundle\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455171 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ffbe35-d1b5-48b0-8481-d788b0801196-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455172 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/30a645a0-5c78-4576-ba66-56e95fb6b07a-node-bootstrap-token\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455276 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/104d79b7-c0c6-4cde-a7c2-d60a06a38647-config\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455379 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-srv-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455401 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455700 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e68f3762-2025-4800-98ff-ca440c176b45-signing-key\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.456173 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:17.956160171 +0000 UTC m=+143.346413327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.456583 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-service-ca-bundle\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.456670 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-plugins-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.457168 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df0b32f8-987a-415d-a22e-c396f3bbaa8f-config-volume\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458004 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-profile-collector-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458137 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e68f3762-2025-4800-98ff-ca440c176b45-signing-cabundle\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.455405 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ce69b998-69b8-46f8-b72b-83aa741479da-mountpoint-dir\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458281 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d14aca5a-f217-4b87-bbcd-431b01ec7511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458474 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458680 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c277ffe-d148-4407-9d6e-bb81b17724ac-metrics-tls\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.458824 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/02367c13-c0f2-4600-9baa-1a55f0f50e8b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.459349 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.459775 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9612bf32-849b-40f1-bf8a-cada1f25acf5-srv-cert\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.460081 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.460517 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.460890 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/eac40cfc-8509-4b68-9962-4c2e602d155f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462383 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d14aca5a-f217-4b87-bbcd-431b01ec7511-proxy-tls\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462458 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-serving-cert\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462676 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/458d18eb-2c58-43d6-889b-b1ce6f367050-webhook-cert\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462808 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462886 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/104d79b7-c0c6-4cde-a7c2-d60a06a38647-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462929 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-stats-auth\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.462924 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzzjk\" (UniqueName: \"kubernetes.io/projected/bf2f529a-ec6c-417d-bee8-7520d9b3d41c-kube-api-access-nzzjk\") pod \"machine-config-operator-74547568cd-nqccb\" (UID: \"bf2f529a-ec6c-417d-bee8-7520d9b3d41c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463250 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/df0b32f8-987a-415d-a22e-c396f3bbaa8f-metrics-tls\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463442 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6ffbe35-d1b5-48b0-8481-d788b0801196-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463586 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba1e1437-9755-498f-b07f-40997bdfa64c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463807 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2c79a98-a142-4c13-a989-b2c887f03d46-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463875 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-default-certificate\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.463891 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c54305b3-f1c3-44b0-a77a-47f4cd78794c-serving-cert\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.464439 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.465909 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/87824188-046e-4f32-b8da-016605488fca-cert\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.477116 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrptt\" (UniqueName: \"kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt\") pod \"route-controller-manager-6576b87f9c-48bz5\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.497521 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.502036 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea550d07-6ac0-477d-a50d-dba8b5a528a1-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-v64tq\" (UID: \"ea550d07-6ac0-477d-a50d-dba8b5a528a1\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.510774 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.521134 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.537698 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ljlt5\" (UID: \"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.554451 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.554820 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.054787092 +0000 UTC m=+143.445040288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.555021 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.556621 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.056613674 +0000 UTC m=+143.446866830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.558893 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pkzz\" (UniqueName: \"kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz\") pod \"console-f9d7485db-slbcq\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.566430 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.579411 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzct8\" (UniqueName: \"kubernetes.io/projected/6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed-kube-api-access-pzct8\") pod \"apiserver-7bbb656c7d-w4j4j\" (UID: \"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.599696 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjhn8\" (UniqueName: \"kubernetes.io/projected/dbcb231b-60c1-4027-b85d-bbdb1c193304-kube-api-access-gjhn8\") pod \"openshift-controller-manager-operator-756b6f6bc6-m6whz\" (UID: \"dbcb231b-60c1-4027-b85d-bbdb1c193304\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.620004 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.634164 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.651779 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kns7n\" (UniqueName: \"kubernetes.io/projected/30eb86e9-4989-4a95-bd07-07ff6a872298-kube-api-access-kns7n\") pod \"openshift-config-operator-7777fb866f-2zxw9\" (UID: \"30eb86e9-4989-4a95-bd07-07ff6a872298\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.655252 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.655658 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.656876 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.657407 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.157391051 +0000 UTC m=+143.547644207 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.660346 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcgms\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.665938 4793 generic.go:334] "Generic (PLEG): container finished" podID="af46e162-c595-4a44-98e5-a30e531aa9ed" containerID="930f961779390c5fe86a7c292f30c1ab64ed0456e9d5e3896721210df079e3cd" exitCode=0 Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.665987 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" event={"ID":"af46e162-c595-4a44-98e5-a30e531aa9ed","Type":"ContainerDied","Data":"930f961779390c5fe86a7c292f30c1ab64ed0456e9d5e3896721210df079e3cd"} Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.679022 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.687464 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9xtb\" (UniqueName: \"kubernetes.io/projected/f8b4a330-77aa-4fe5-af50-14f41e5f727e-kube-api-access-l9xtb\") pod \"openshift-apiserver-operator-796bbdcf4f-rpssm\" (UID: \"f8b4a330-77aa-4fe5-af50-14f41e5f727e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.701315 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrx42\" (UniqueName: \"kubernetes.io/projected/c26f1fef-b150-4021-8cfc-c08128248f8a-kube-api-access-mrx42\") pod \"migrator-59844c95c7-2qgvl\" (UID: \"c26f1fef-b150-4021-8cfc-c08128248f8a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.712343 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.716149 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.725748 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wth5c\" (UniqueName: \"kubernetes.io/projected/8bb08394-518f-4f9c-811d-9bcbf765aad0-kube-api-access-wth5c\") pod \"etcd-operator-b45778765-n24xk\" (UID: \"8bb08394-518f-4f9c-811d-9bcbf765aad0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.729430 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.741212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jzk2\" (UniqueName: \"kubernetes.io/projected/1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae-kube-api-access-6jzk2\") pod \"console-operator-58897d9998-4nvlb\" (UID: \"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae\") " pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.743248 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.759295 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.759710 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.259694696 +0000 UTC m=+143.649947852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.771942 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9c2l\" (UniqueName: \"kubernetes.io/projected/a043f31e-8e0a-41eb-a2ad-73f6d5795b0a-kube-api-access-p9c2l\") pod \"downloads-7954f5f757-mq8nr\" (UID: \"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a\") " pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.831255 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.834611 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsw8l\" (UniqueName: \"kubernetes.io/projected/30a645a0-5c78-4576-ba66-56e95fb6b07a-kube-api-access-lsw8l\") pod \"machine-config-server-6kf4l\" (UID: \"30a645a0-5c78-4576-ba66-56e95fb6b07a\") " pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.853214 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzfd2\" (UniqueName: \"kubernetes.io/projected/c54305b3-f1c3-44b0-a77a-47f4cd78794c-kube-api-access-rzfd2\") pod \"authentication-operator-69f744f599-c22sx\" (UID: \"c54305b3-f1c3-44b0-a77a-47f4cd78794c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.854028 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-787z9\" (UniqueName: \"kubernetes.io/projected/458d18eb-2c58-43d6-889b-b1ce6f367050-kube-api-access-787z9\") pod \"packageserver-d55dfcdfc-jtc2j\" (UID: \"458d18eb-2c58-43d6-889b-b1ce6f367050\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.860424 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.860604 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.360581746 +0000 UTC m=+143.750834902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.860875 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slr7v\" (UniqueName: \"kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v\") pod \"marketplace-operator-79b997595-vbqcn\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.860937 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.861556 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.361535053 +0000 UTC m=+143.751788289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.864029 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/df0b32f8-987a-415d-a22e-c396f3bbaa8f-kube-api-access-25vqs\") pod \"dns-default-sn9b2\" (UID: \"df0b32f8-987a-415d-a22e-c396f3bbaa8f\") " pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.864671 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.864826 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p"] Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.880095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4ntr\" (UniqueName: \"kubernetes.io/projected/b1286791-4cc8-4da3-8450-5bf2e4dc577d-kube-api-access-d4ntr\") pod \"package-server-manager-789f6589d5-fc5bx\" (UID: \"b1286791-4cc8-4da3-8450-5bf2e4dc577d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.886157 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.886247 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.891955 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.899449 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb9c4\" (UniqueName: \"kubernetes.io/projected/02367c13-c0f2-4600-9baa-1a55f0f50e8b-kube-api-access-hb9c4\") pod \"olm-operator-6b444d44fb-vlz9q\" (UID: \"02367c13-c0f2-4600-9baa-1a55f0f50e8b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.930216 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.930710 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr7jb\" (UniqueName: \"kubernetes.io/projected/ce69b998-69b8-46f8-b72b-83aa741479da-kube-api-access-tr7jb\") pod \"csi-hostpathplugin-95vph\" (UID: \"ce69b998-69b8-46f8-b72b-83aa741479da\") " pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.938826 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6kf4l" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.949872 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/104d79b7-c0c6-4cde-a7c2-d60a06a38647-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-htfcv\" (UID: \"104d79b7-c0c6-4cde-a7c2-d60a06a38647\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.964218 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:17 crc kubenswrapper[4793]: E0127 20:05:17.964843 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.464826695 +0000 UTC m=+143.855079841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.964940 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-95vph" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.966769 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m4b8\" (UniqueName: \"kubernetes.io/projected/a2c79a98-a142-4c13-a989-b2c887f03d46-kube-api-access-6m4b8\") pod \"kube-storage-version-migrator-operator-b67b599dd-s7j86\" (UID: \"a2c79a98-a142-4c13-a989-b2c887f03d46\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.977408 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6ffbe35-d1b5-48b0-8481-d788b0801196-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7hp8w\" (UID: \"e6ffbe35-d1b5-48b0-8481-d788b0801196\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.985767 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" Jan 27 20:05:17 crc kubenswrapper[4793]: I0127 20:05:17.998063 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.000418 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6pbf\" (UniqueName: \"kubernetes.io/projected/ab3e57d5-0315-4083-b0d8-80af81ba8ea0-kube-api-access-q6pbf\") pod \"router-default-5444994796-9fj82\" (UID: \"ab3e57d5-0315-4083-b0d8-80af81ba8ea0\") " pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.034178 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptzm5\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-kube-api-access-ptzm5\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.039465 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdhv4\" (UniqueName: \"kubernetes.io/projected/10cac1f8-9f3e-43a3-9061-ff12b7cecbb2-kube-api-access-zdhv4\") pod \"service-ca-operator-777779d784-m6tz2\" (UID: \"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.059166 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-299sb\" (UniqueName: \"kubernetes.io/projected/9612bf32-849b-40f1-bf8a-cada1f25acf5-kube-api-access-299sb\") pod \"catalog-operator-68c6474976-d7pdv\" (UID: \"9612bf32-849b-40f1-bf8a-cada1f25acf5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.097843 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.098324 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.099140 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.099413 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.599402473 +0000 UTC m=+143.989655619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.100226 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.126957 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7nmx\" (UniqueName: \"kubernetes.io/projected/ba1e1437-9755-498f-b07f-40997bdfa64c-kube-api-access-x7nmx\") pod \"control-plane-machine-set-operator-78cbb6b69f-4lgxd\" (UID: \"ba1e1437-9755-498f-b07f-40997bdfa64c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.129999 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.130334 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2f53dac-143e-4ab4-b76d-cc7bf64f8df4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-shv5g\" (UID: \"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.130890 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgz5l\" (UniqueName: \"kubernetes.io/projected/d14aca5a-f217-4b87-bbcd-431b01ec7511-kube-api-access-sgz5l\") pod \"machine-config-controller-84d6567774-bnv2v\" (UID: \"d14aca5a-f217-4b87-bbcd-431b01ec7511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.135888 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.155631 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwtkp\" (UniqueName: \"kubernetes.io/projected/e68f3762-2025-4800-98ff-ca440c176b45-kube-api-access-qwtkp\") pod \"service-ca-9c57cc56f-gzpt9\" (UID: \"e68f3762-2025-4800-98ff-ca440c176b45\") " pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.155912 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.174009 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.174665 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shsmh\" (UniqueName: \"kubernetes.io/projected/eac40cfc-8509-4b68-9962-4c2e602d155f-kube-api-access-shsmh\") pod \"multus-admission-controller-857f4d67dd-gkcks\" (UID: \"eac40cfc-8509-4b68-9962-4c2e602d155f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.183088 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86xcs\" (UniqueName: \"kubernetes.io/projected/0c277ffe-d148-4407-9d6e-bb81b17724ac-kube-api-access-86xcs\") pod \"dns-operator-744455d44c-89kfv\" (UID: \"0c277ffe-d148-4407-9d6e-bb81b17724ac\") " pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.186667 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.193481 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.201195 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.202172 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.202364 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.702340198 +0000 UTC m=+144.092593404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.202736 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sghkk"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.205753 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.217186 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgkjl\" (UniqueName: \"kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl\") pod \"collect-profiles-29492400-v99t4\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.222588 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.229264 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk2wp\" (UniqueName: \"kubernetes.io/projected/87824188-046e-4f32-b8da-016605488fca-kube-api-access-rk2wp\") pod \"ingress-canary-nzg26\" (UID: \"87824188-046e-4f32-b8da-016605488fca\") " pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.229944 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.245466 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kc2rq\" (UniqueName: \"kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq\") pod \"controller-manager-879f6c89f-zw89c\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.246212 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-nzg26" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.279252 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.286137 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.303603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.304620 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.804591092 +0000 UTC m=+144.194844248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.338293 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.351165 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.373988 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.384100 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.403114 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.405575 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.406461 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:18.906432369 +0000 UTC m=+144.296685525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.419301 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.445619 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.507173 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.507858 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.007845317 +0000 UTC m=+144.398098473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.511455 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.527199 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.608062 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.608688 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.108668606 +0000 UTC m=+144.498921762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: W0127 20:05:18.627377 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc26f1fef_b150_4021_8cfc_c08128248f8a.slice/crio-e787a51de4f0960aa6f308daf2f313098100d41a5cf043f3b20aac7b650682bf WatchSource:0}: Error finding container e787a51de4f0960aa6f308daf2f313098100d41a5cf043f3b20aac7b650682bf: Status 404 returned error can't find the container with id e787a51de4f0960aa6f308daf2f313098100d41a5cf043f3b20aac7b650682bf Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.645869 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.647678 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.655436 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-mq8nr"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.752793 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.753234 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.2532187 +0000 UTC m=+144.643471856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.792324 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" event={"ID":"aa065492-723b-40dd-9259-1a4452804068","Type":"ContainerStarted","Data":"f6c6682eafebfffd5f8bab70a7ee5d339586a794da80b16aa91bd439d3655510"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.795049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" event={"ID":"bf2f529a-ec6c-417d-bee8-7520d9b3d41c","Type":"ContainerStarted","Data":"66694d7eb75641754fe93065027a53e3f2b239a42f87fe56fa8d5023a2ddc05c"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.796257 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" event={"ID":"41a53f60-6551-47cf-a063-02a42f9983e9","Type":"ContainerStarted","Data":"1e311a6c539e7f56e08fd9bd59cbd97141fd741f6c521abe69a84ca2bfb157f6"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.796279 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" event={"ID":"41a53f60-6551-47cf-a063-02a42f9983e9","Type":"ContainerStarted","Data":"794a5bc3933c6a849d156be56c4fed003d2e841cd031d138ed4b868feb7b3960"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.803701 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.807613 4793 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-jtmmw container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.807658 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.826891 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" event={"ID":"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed","Type":"ContainerStarted","Data":"7e86a10c56eb54aae493b512793fc61fe016dd3f10b2767688a608f1b483888b"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.855496 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.855695 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.355662688 +0000 UTC m=+144.745915854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.856019 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.856321 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.356311379 +0000 UTC m=+144.746564535 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.859479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" event={"ID":"7be50072-4d5b-4ef3-a534-bdce40d627cb","Type":"ContainerStarted","Data":"290b1c603bca832ef2c7bc403ee1a695ad5937204512ebb22f00ef5e55e1bda0"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.875319 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" event={"ID":"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3","Type":"ContainerStarted","Data":"2d99e996f54c76b3ed8fad05e42658eddc78df00fc2d7ce89c6634f1c2d8c549"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.876121 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6kf4l" event={"ID":"30a645a0-5c78-4576-ba66-56e95fb6b07a","Type":"ContainerStarted","Data":"a4c08cadb70895ec9098804b893055bf50542188b51f3e55b166c54fa00422a7"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.877840 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9fj82" event={"ID":"ab3e57d5-0315-4083-b0d8-80af81ba8ea0","Type":"ContainerStarted","Data":"0a3d65d875be8c6a7cdf287d9c03a58e47c01897f0d9518f7c3e8b62877fd94b"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.882870 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.883605 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" event={"ID":"8087e440-2261-4895-a480-c638b7615f67","Type":"ContainerStarted","Data":"c33b4c71ad1bd74c7604d15ad610557c33827d5d5940389cbb8b333e48fbb8f4"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.883678 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" event={"ID":"8087e440-2261-4895-a480-c638b7615f67","Type":"ContainerStarted","Data":"4aa08cfd9b6e12277ad3da6e8e7a23d736e3996507dcfa4ab01466f82be7ac76"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.884247 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-slbcq" event={"ID":"f8ceccd4-2d2a-45c9-a255-2d6763b7d150","Type":"ContainerStarted","Data":"54d0992a59b891c18b927532464cecf178526ab9fcb3498dbc4e90008c8c1d64"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.887476 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" event={"ID":"c26f1fef-b150-4021-8cfc-c08128248f8a","Type":"ContainerStarted","Data":"e787a51de4f0960aa6f308daf2f313098100d41a5cf043f3b20aac7b650682bf"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.891235 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sn9b2"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.894502 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" event={"ID":"ea550d07-6ac0-477d-a50d-dba8b5a528a1","Type":"ContainerStarted","Data":"1530cf08bf984d24158b4a01857313f57b6ad62379019e44014fb7f95fe89ce9"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.895960 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" event={"ID":"74c3798b-d26e-4be2-817f-a9004fd819c1","Type":"ContainerStarted","Data":"dde87666f67f887c41b2296d36cf49f4cea8eb5322e780c6714992ba2f6aedaa"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.898797 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" event={"ID":"af46e162-c595-4a44-98e5-a30e531aa9ed","Type":"ContainerStarted","Data":"36e62bcfc64a0f7bda653651f499efa1f6bcbd671897f55a58c146ec00e0faf5"} Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.918426 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4nvlb"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.918486 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx"] Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.958173 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.958472 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.45843179 +0000 UTC m=+144.848684946 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:18 crc kubenswrapper[4793]: I0127 20:05:18.963866 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:18 crc kubenswrapper[4793]: E0127 20:05:18.964507 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.464491828 +0000 UTC m=+144.854744984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.066048 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.066573 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.566530767 +0000 UTC m=+144.956783923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.170534 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.171145 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.671127913 +0000 UTC m=+145.061381069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: W0127 20:05:19.181472 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbcb231b_60c1_4027_b85d_bbdb1c193304.slice/crio-b57a00d1538050dbcf0a1062e0c9ed59cdc3e2300db4212a3f7c90c5243a6a6f WatchSource:0}: Error finding container b57a00d1538050dbcf0a1062e0c9ed59cdc3e2300db4212a3f7c90c5243a6a6f: Status 404 returned error can't find the container with id b57a00d1538050dbcf0a1062e0c9ed59cdc3e2300db4212a3f7c90c5243a6a6f Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.310640 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.310951 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.810926443 +0000 UTC m=+145.201179609 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.412323 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.412765 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:19.91274156 +0000 UTC m=+145.302994716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.521253 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.521451 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.021425867 +0000 UTC m=+145.411679013 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.521659 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.522319 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.022308253 +0000 UTC m=+145.412561409 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.623100 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.623359 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.123300175 +0000 UTC m=+145.513553331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.623724 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.624240 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.124227971 +0000 UTC m=+145.514481127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.796211 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.796708 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.296688341 +0000 UTC m=+145.686941497 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.898657 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:19 crc kubenswrapper[4793]: E0127 20:05:19.899354 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.399338281 +0000 UTC m=+145.789591447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.927706 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sn9b2" event={"ID":"df0b32f8-987a-415d-a22e-c396f3bbaa8f","Type":"ContainerStarted","Data":"da2ce383ef052e0b43b3e028756c73ceb2fe8c251aa61e5ba2c5623efb4a8bbb"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.928680 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" event={"ID":"dbcb231b-60c1-4027-b85d-bbdb1c193304","Type":"ContainerStarted","Data":"b57a00d1538050dbcf0a1062e0c9ed59cdc3e2300db4212a3f7c90c5243a6a6f"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.929803 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" event={"ID":"74c3798b-d26e-4be2-817f-a9004fd819c1","Type":"ContainerStarted","Data":"769db7ca59527d61dbd5f1d9e7e9f63f7596fe16f5b20dee1a712a0de0622af1"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.930499 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" event={"ID":"30eb86e9-4989-4a95-bd07-07ff6a872298","Type":"ContainerStarted","Data":"ba8852473cf1d841b17e84a9d3ada7f56fd10c1d37f63368aab91f098079d5e7"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.941477 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" event={"ID":"b1286791-4cc8-4da3-8450-5bf2e4dc577d","Type":"ContainerStarted","Data":"d73a80ada209ba22607497eac33fabd61bb4a6f2860a2f53ec741c482e23e945"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.942382 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mq8nr" event={"ID":"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a","Type":"ContainerStarted","Data":"6f6cf4b05d14c9dcf9e8b4f38a28589a56007f1903b5e086524a77519d4c2b13"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.943829 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" event={"ID":"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae","Type":"ContainerStarted","Data":"2d19464c4d6b5bda3ef7de293cec9639134271eec7f1785336ea8cb7cd225463"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.944752 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" event={"ID":"f8b4a330-77aa-4fe5-af50-14f41e5f727e","Type":"ContainerStarted","Data":"951f4f8817c10f1dc69beec3bba079fcf717fdd22c64f06db82e78dd85c7caee"} Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.945669 4793 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-jtmmw container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Jan 27 20:05:19 crc kubenswrapper[4793]: I0127 20:05:19.945712 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:19.999474 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:19.999624 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.49959423 +0000 UTC m=+145.889847396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:19.999947 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.000314 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.500302112 +0000 UTC m=+145.890555288 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.048758 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" podStartSLOduration=124.048735452 podStartE2EDuration="2m4.048735452s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:19.970696557 +0000 UTC m=+145.360949713" watchObservedRunningTime="2026-01-27 20:05:20.048735452 +0000 UTC m=+145.438988598" Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.053500 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.055746 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n24xk"] Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.057668 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-95vph"] Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.100508 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.100696 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.600662753 +0000 UTC m=+145.990915899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.100937 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.102004 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.601985786 +0000 UTC m=+145.992239042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: W0127 20:05:20.147598 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce69b998_69b8_46f8_b72b_83aa741479da.slice/crio-a7dc17d4da8f34675c59263980e56fa6687452153cf8187c02a0b0aa2ccab8d0 WatchSource:0}: Error finding container a7dc17d4da8f34675c59263980e56fa6687452153cf8187c02a0b0aa2ccab8d0: Status 404 returned error can't find the container with id a7dc17d4da8f34675c59263980e56fa6687452153cf8187c02a0b0aa2ccab8d0 Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.201720 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.202088 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.702072871 +0000 UTC m=+146.092326027 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.321296 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.440172 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:20.940153934 +0000 UTC m=+146.330407090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.499315 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j"] Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.539508 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.540056 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.040032547 +0000 UTC m=+146.430285703 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.649967 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.650502 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.150490926 +0000 UTC m=+146.540744082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:20 crc kubenswrapper[4793]: W0127 20:05:20.661107 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod458d18eb_2c58_43d6_889b_b1ce6f367050.slice/crio-0ecf03d8b654204d2589154a02310264f7ba9bdf79bba35b3cb66e2448c0d745 WatchSource:0}: Error finding container 0ecf03d8b654204d2589154a02310264f7ba9bdf79bba35b3cb66e2448c0d745: Status 404 returned error can't find the container with id 0ecf03d8b654204d2589154a02310264f7ba9bdf79bba35b3cb66e2448c0d745 Jan 27 20:05:20 crc kubenswrapper[4793]: I0127 20:05:20.750918 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:20 crc kubenswrapper[4793]: E0127 20:05:20.751421 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.251398346 +0000 UTC m=+146.641651502 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:20.852519 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:20.853389 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.353372865 +0000 UTC m=+146.743626021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:20.954621 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:20.954957 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.454942906 +0000 UTC m=+146.845196062 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.022114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerStarted","Data":"6862ce06f7c37bce5e9c8bfbfb54d3d930d9768d3d8b74ef24b2d97eef1fcf4d"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.025130 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" event={"ID":"aa065492-723b-40dd-9259-1a4452804068","Type":"ContainerStarted","Data":"dc3dde5dcd9427093a4dc20ba3845e9ba056034aa9ca5426676af71b1680e65e"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.026096 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.029172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" event={"ID":"8bb08394-518f-4f9c-811d-9bcbf765aad0","Type":"ContainerStarted","Data":"752a9653569a6b537075f07f584b9ea0fdc823c82f42d70f3882a8ccda63ff60"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.032357 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-95vph" event={"ID":"ce69b998-69b8-46f8-b72b-83aa741479da","Type":"ContainerStarted","Data":"a7dc17d4da8f34675c59263980e56fa6687452153cf8187c02a0b0aa2ccab8d0"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.037047 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" event={"ID":"458d18eb-2c58-43d6-889b-b1ce6f367050","Type":"ContainerStarted","Data":"0ecf03d8b654204d2589154a02310264f7ba9bdf79bba35b3cb66e2448c0d745"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.039478 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" event={"ID":"8087e440-2261-4895-a480-c638b7615f67","Type":"ContainerStarted","Data":"0d786d2f5b7ae7dee6e532a0abf46c62002c254e131a1dfa3e5c14627b8b1a05"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.049194 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" event={"ID":"bf2f529a-ec6c-417d-bee8-7520d9b3d41c","Type":"ContainerStarted","Data":"f62f7a2c14ec8ab6c8495b961bfd9fe90aecf99fbad1f11396b17a11f15745e2"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.055759 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.055886 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.056477 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.056899 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.556884875 +0000 UTC m=+146.947138031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.059767 4793 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-48bz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.059812 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.065590 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.066754 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podStartSLOduration=124.0667329 podStartE2EDuration="2m4.0667329s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:21.047439147 +0000 UTC m=+146.437692303" watchObservedRunningTime="2026-01-27 20:05:21.0667329 +0000 UTC m=+146.456986056" Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.078886 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.078937 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-c22sx"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.084354 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-l2x22" podStartSLOduration=126.084337572 podStartE2EDuration="2m6.084337572s" podCreationTimestamp="2026-01-27 20:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:21.072758976 +0000 UTC m=+146.463012132" watchObservedRunningTime="2026-01-27 20:05:21.084337572 +0000 UTC m=+146.474590728" Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.086445 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.086659 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" event={"ID":"7be50072-4d5b-4ef3-a534-bdce40d627cb","Type":"ContainerStarted","Data":"91080fc1f07736d3871e6f70a453a10d033c5632b1a3703acfcfd926f0557533"} Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.092077 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gzpt9"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.100506 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.102197 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.105789 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.108519 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-nzg26"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.110658 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-89kfv"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.157764 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.158104 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.65808451 +0000 UTC m=+147.048337666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.158728 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.159520 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.659506425 +0000 UTC m=+147.049759571 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.161842 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.167509 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gkcks"] Jan 27 20:05:21 crc kubenswrapper[4793]: W0127 20:05:21.182087 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02367c13_c0f2_4600_9baa_1a55f0f50e8b.slice/crio-d8b213f7e31fd4710cca3e10efa990bc703e26617d5c140c5ac41e1aa416fff7 WatchSource:0}: Error finding container d8b213f7e31fd4710cca3e10efa990bc703e26617d5c140c5ac41e1aa416fff7: Status 404 returned error can't find the container with id d8b213f7e31fd4710cca3e10efa990bc703e26617d5c140c5ac41e1aa416fff7 Jan 27 20:05:21 crc kubenswrapper[4793]: W0127 20:05:21.203022 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd14aca5a_f217_4b87_bbcd_431b01ec7511.slice/crio-133c609c5d5452c038644011158042c09b79903b8a27177e69474183e1833cbb WatchSource:0}: Error finding container 133c609c5d5452c038644011158042c09b79903b8a27177e69474183e1833cbb: Status 404 returned error can't find the container with id 133c609c5d5452c038644011158042c09b79903b8a27177e69474183e1833cbb Jan 27 20:05:21 crc kubenswrapper[4793]: W0127 20:05:21.214155 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87824188_046e_4f32_b8da_016605488fca.slice/crio-1c9c1942f10ae596430e03443417fce75fab74ae83d93a35426ad27465e5821f WatchSource:0}: Error finding container 1c9c1942f10ae596430e03443417fce75fab74ae83d93a35426ad27465e5821f: Status 404 returned error can't find the container with id 1c9c1942f10ae596430e03443417fce75fab74ae83d93a35426ad27465e5821f Jan 27 20:05:21 crc kubenswrapper[4793]: W0127 20:05:21.243684 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11588ab1_689a_4227_a887_a57b945807a2.slice/crio-4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2 WatchSource:0}: Error finding container 4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2: Status 404 returned error can't find the container with id 4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2 Jan 27 20:05:21 crc kubenswrapper[4793]: W0127 20:05:21.244086 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeac40cfc_8509_4b68_9962_4c2e602d155f.slice/crio-bba2545875779322aa88d29961e60ac00c5d0683855557ff3291d2b29e906039 WatchSource:0}: Error finding container bba2545875779322aa88d29961e60ac00c5d0683855557ff3291d2b29e906039: Status 404 returned error can't find the container with id bba2545875779322aa88d29961e60ac00c5d0683855557ff3291d2b29e906039 Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.252948 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.254573 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g"] Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.259999 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.260508 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.760490146 +0000 UTC m=+147.150743312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.374439 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.374896 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.874879466 +0000 UTC m=+147.265132622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.476151 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.476296 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.976265654 +0000 UTC m=+147.366518810 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.476831 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.477320 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:21.977310182 +0000 UTC m=+147.367563338 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.590214 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.590389 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.090360168 +0000 UTC m=+147.480613324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.590592 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.590912 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.090899067 +0000 UTC m=+147.481152223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.692213 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.692460 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.192408178 +0000 UTC m=+147.582661334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.692535 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.692879 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.192862086 +0000 UTC m=+147.583115242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:21 crc kubenswrapper[4793]: I0127 20:05:21.867664 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:21 crc kubenswrapper[4793]: E0127 20:05:21.868307 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.368281588 +0000 UTC m=+147.758534744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.051023 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.051400 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.551380916 +0000 UTC m=+147.941634162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.143988 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" event={"ID":"470ff764-d3e6-48a6-aa1b-b4777a1d746f","Type":"ContainerStarted","Data":"babb3039a55f0b4b8cefac07cc1e41129acd54134ec857fa7e818373cc2d3290"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.144278 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-nzg26" event={"ID":"87824188-046e-4f32-b8da-016605488fca","Type":"ContainerStarted","Data":"1c9c1942f10ae596430e03443417fce75fab74ae83d93a35426ad27465e5821f"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.144291 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" event={"ID":"d14aca5a-f217-4b87-bbcd-431b01ec7511","Type":"ContainerStarted","Data":"133c609c5d5452c038644011158042c09b79903b8a27177e69474183e1833cbb"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.144301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" event={"ID":"74c3798b-d26e-4be2-817f-a9004fd819c1","Type":"ContainerStarted","Data":"c92541311a08cf89817a88c3aee447ac0368721135b59bebb7a595952ea0f195"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.144397 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" event={"ID":"0c277ffe-d148-4407-9d6e-bb81b17724ac","Type":"ContainerStarted","Data":"1ce7aee576e0a488e81d48eeb24f41d57900408e14722710521f38eadff2fe62"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.153289 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.154308 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.654285912 +0000 UTC m=+148.044539078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.173832 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-swb5p" podStartSLOduration=125.173804818 podStartE2EDuration="2m5.173804818s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.166690512 +0000 UTC m=+147.556943678" watchObservedRunningTime="2026-01-27 20:05:22.173804818 +0000 UTC m=+147.564057974" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.200305 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" event={"ID":"02367c13-c0f2-4600-9baa-1a55f0f50e8b","Type":"ContainerStarted","Data":"d8b213f7e31fd4710cca3e10efa990bc703e26617d5c140c5ac41e1aa416fff7"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.205140 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" event={"ID":"e6ffbe35-d1b5-48b0-8481-d788b0801196","Type":"ContainerStarted","Data":"a6fec9656195b355a986af16926b42957adb2f61e4a4af0cd8dcde544096bc2f"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.206066 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mq8nr" event={"ID":"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a","Type":"ContainerStarted","Data":"216e537348723b7afd6c1593c5a6c4937e4ca4f4d4ddd41015ae1f48872be21c"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.206708 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.212289 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.212345 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.212446 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" event={"ID":"9612bf32-849b-40f1-bf8a-cada1f25acf5","Type":"ContainerStarted","Data":"2457eb42b2c9a5a0953f92b30bb0985af351ce4bb8c7461a4f0fc5fdafc16e88"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.216576 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9fj82" event={"ID":"ab3e57d5-0315-4083-b0d8-80af81ba8ea0","Type":"ContainerStarted","Data":"ce5635d4ccaccd45ba2bf58d99d4300d35798f4968c682471615f2ce5619e9b3"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.220677 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6kf4l" event={"ID":"30a645a0-5c78-4576-ba66-56e95fb6b07a","Type":"ContainerStarted","Data":"b390171c463390fc834ca453ae45f910314b8b629f432011f50740dc67945913"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.222823 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" event={"ID":"eac40cfc-8509-4b68-9962-4c2e602d155f","Type":"ContainerStarted","Data":"bba2545875779322aa88d29961e60ac00c5d0683855557ff3291d2b29e906039"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.229990 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" event={"ID":"af46e162-c595-4a44-98e5-a30e531aa9ed","Type":"ContainerStarted","Data":"8b30a6b8cdc304179f9a56c92e881ac489f182e17a685700daa0a0b34226a1cd"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.244671 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" event={"ID":"e68f3762-2025-4800-98ff-ca440c176b45","Type":"ContainerStarted","Data":"c3ca443a7c7c73dd904f88b5d87446562c3200a59ef693d5492918bb6af03fc8"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.246994 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" event={"ID":"b1286791-4cc8-4da3-8450-5bf2e4dc577d","Type":"ContainerStarted","Data":"ff6445c466d44cb2829c5b28fb2c98a890db4fa40c76c543b9c11c16cbfd11a7"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.262532 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.262911 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.762900288 +0000 UTC m=+148.153153444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.288830 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" event={"ID":"ba1e1437-9755-498f-b07f-40997bdfa64c","Type":"ContainerStarted","Data":"6d0d46597ea81e32bdd6ee6d5e6be9b4850cadae43fd5114af05b982eb938956"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.302366 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" event={"ID":"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3","Type":"ContainerStarted","Data":"b66c43217a3e474ac06394687d4b1bd37df7ab9d820b039b490d08386ad6f96a"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.303821 4793 generic.go:334] "Generic (PLEG): container finished" podID="30eb86e9-4989-4a95-bd07-07ff6a872298" containerID="562ab1645b938e1d74ab0b99e5d67b7cba2702afcd5ff0a1c2189202f61417a4" exitCode=0 Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.303871 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" event={"ID":"30eb86e9-4989-4a95-bd07-07ff6a872298","Type":"ContainerDied","Data":"562ab1645b938e1d74ab0b99e5d67b7cba2702afcd5ff0a1c2189202f61417a4"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.309051 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" event={"ID":"dbcb231b-60c1-4027-b85d-bbdb1c193304","Type":"ContainerStarted","Data":"74410cd74b452731a9b179e4e34b0d135fc272ae6d2f036df97bf4c89494f2ef"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.314613 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" event={"ID":"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2","Type":"ContainerStarted","Data":"1c69e03b1361811f6ea8643fa921bb852f62d50c058a22cb3571b8118d9c0af1"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.315522 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-slbcq" event={"ID":"f8ceccd4-2d2a-45c9-a255-2d6763b7d150","Type":"ContainerStarted","Data":"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.324662 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" event={"ID":"a2c79a98-a142-4c13-a989-b2c887f03d46","Type":"ContainerStarted","Data":"255771f015efc5b42883680c082fd5599374937c024f8569b88c01190e78ca33"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.328891 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" event={"ID":"11588ab1-689a-4227-a887-a57b945807a2","Type":"ContainerStarted","Data":"4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.329772 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" event={"ID":"104d79b7-c0c6-4cde-a7c2-d60a06a38647","Type":"ContainerStarted","Data":"20a6dc7a6c7ad4ce7e6f5a3e555a85e5d32509c0cfb7e87fe12aa4423424aeb9"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.330882 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" event={"ID":"ea550d07-6ac0-477d-a50d-dba8b5a528a1","Type":"ContainerStarted","Data":"ff4143c1de04d92e01e03fb5b27c2eada5b0e7c49c113339b3e6c2068b2b2363"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.332377 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" event={"ID":"c26f1fef-b150-4021-8cfc-c08128248f8a","Type":"ContainerStarted","Data":"7d8df38f378c6d2ac08c50a7e6624f78d3218cf1ac7b008d9bb87151f0101344"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.333157 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" event={"ID":"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4","Type":"ContainerStarted","Data":"693e010ba6ecf4dac156f76194e815997914f4b344c99785174b21c8e840a0ca"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.334363 4793 generic.go:334] "Generic (PLEG): container finished" podID="6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed" containerID="9f4239ea79f61add8eb94b0e7d859479d2dc7685f10e45b4da1bd8d7ad012c49" exitCode=0 Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.334433 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" event={"ID":"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed","Type":"ContainerDied","Data":"9f4239ea79f61add8eb94b0e7d859479d2dc7685f10e45b4da1bd8d7ad012c49"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.336254 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" event={"ID":"c54305b3-f1c3-44b0-a77a-47f4cd78794c","Type":"ContainerStarted","Data":"e0ee1a4b22a5e388f1ed922e814e968a164937a4dd82c1ffb89429cdf23b318a"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.337397 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" event={"ID":"1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae","Type":"ContainerStarted","Data":"b4d0437e6e72e0f2d6951291af33c644bee03d049bbe5d38febfbb521052877a"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.338138 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.339736 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" event={"ID":"f8b4a330-77aa-4fe5-af50-14f41e5f727e","Type":"ContainerStarted","Data":"c53fb27ace1e0b40b14371eb22a2eb7b2a0d0214c1c9abef95bf3ef5a72952a0"} Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.340264 4793 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-48bz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.340294 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.363924 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.366051 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.866029478 +0000 UTC m=+148.256282634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.374841 4793 patch_prober.go:28] interesting pod/console-operator-58897d9998-4nvlb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.375060 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" podUID="1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.417170 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-6kf4l" podStartSLOduration=7.417154514 podStartE2EDuration="7.417154514s" podCreationTimestamp="2026-01-27 20:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.415846262 +0000 UTC m=+147.806099438" watchObservedRunningTime="2026-01-27 20:05:22.417154514 +0000 UTC m=+147.807407670" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.418204 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-mq8nr" podStartSLOduration=125.418197493 podStartE2EDuration="2m5.418197493s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.244675395 +0000 UTC m=+147.634928551" watchObservedRunningTime="2026-01-27 20:05:22.418197493 +0000 UTC m=+147.808450649" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.454140 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" podStartSLOduration=126.45412344 podStartE2EDuration="2m6.45412344s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.451779098 +0000 UTC m=+147.842032264" watchObservedRunningTime="2026-01-27 20:05:22.45412344 +0000 UTC m=+147.844376596" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.468717 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.472978 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:22.972965694 +0000 UTC m=+148.363218850 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.554341 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-9fj82" podStartSLOduration=125.554324827 podStartE2EDuration="2m5.554324827s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.552050418 +0000 UTC m=+147.942303584" watchObservedRunningTime="2026-01-27 20:05:22.554324827 +0000 UTC m=+147.944577983" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.570155 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.570532 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.070513845 +0000 UTC m=+148.460767011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.671523 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.671892 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.171874622 +0000 UTC m=+148.562127778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.793686 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:22 crc kubenswrapper[4793]: E0127 20:05:22.793999 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.293980789 +0000 UTC m=+148.684233945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.818536 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.818663 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:05:22 crc kubenswrapper[4793]: I0127 20:05:22.855273 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rpssm" podStartSLOduration=126.855252225 podStartE2EDuration="2m6.855252225s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:22.853657077 +0000 UTC m=+148.243910233" watchObservedRunningTime="2026-01-27 20:05:22.855252225 +0000 UTC m=+148.245505381" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:22.950205 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:22.950660 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.450646657 +0000 UTC m=+148.840899813 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.053266 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.053742 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.553726206 +0000 UTC m=+148.943979362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.099238 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-slbcq" podStartSLOduration=126.099217994 podStartE2EDuration="2m6.099217994s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.094344917 +0000 UTC m=+148.484598073" watchObservedRunningTime="2026-01-27 20:05:23.099217994 +0000 UTC m=+148.489471150" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.100370 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.101601 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.101641 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.157051 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.157559 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.657516337 +0000 UTC m=+149.047769553 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.163370 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-m6whz" podStartSLOduration=126.163350531 podStartE2EDuration="2m6.163350531s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.128058825 +0000 UTC m=+148.518311981" watchObservedRunningTime="2026-01-27 20:05:23.163350531 +0000 UTC m=+148.553603687" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.164369 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" podStartSLOduration=126.164361459 podStartE2EDuration="2m6.164361459s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.162932334 +0000 UTC m=+148.553185490" watchObservedRunningTime="2026-01-27 20:05:23.164361459 +0000 UTC m=+148.554614615" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.234231 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-v64tq" podStartSLOduration=126.234212948 podStartE2EDuration="2m6.234212948s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.231925717 +0000 UTC m=+148.622178873" watchObservedRunningTime="2026-01-27 20:05:23.234212948 +0000 UTC m=+148.624466104" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.274317 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.274510 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.774438902 +0000 UTC m=+149.164692058 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.274643 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.275050 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.775038512 +0000 UTC m=+149.165291668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.370785 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerStarted","Data":"4471df472af763cae8ddf7904d3cba15132e1f1e747ad9bf69f21eb469057097"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.372125 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.380097 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.380508 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:23.880490023 +0000 UTC m=+149.270743179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.396502 4793 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vbqcn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.396632 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.411973 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podStartSLOduration=126.411956221 podStartE2EDuration="2m6.411956221s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.410945253 +0000 UTC m=+148.801198439" watchObservedRunningTime="2026-01-27 20:05:23.411956221 +0000 UTC m=+148.802209377" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.417424 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" event={"ID":"9612bf32-849b-40f1-bf8a-cada1f25acf5","Type":"ContainerStarted","Data":"25f1f3dc7cb78882cd12ce31c2411afcc9cd9f3bfb10d7b2c3ef70dd8cb5bceb"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.419632 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sn9b2" event={"ID":"df0b32f8-987a-415d-a22e-c396f3bbaa8f","Type":"ContainerStarted","Data":"f506f9aecacdc4419eb4864d97478ea912605d3a097ba7bda44a411b2aab57b3"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.422614 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" event={"ID":"8bb08394-518f-4f9c-811d-9bcbf765aad0","Type":"ContainerStarted","Data":"ab4ee5a918bbbdb6e1d2c9f7c9a03214d54ca273a8c1d4e06701008916a90141"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.425003 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-95vph" event={"ID":"ce69b998-69b8-46f8-b72b-83aa741479da","Type":"ContainerStarted","Data":"762a21a8ee2a9347cae2ac37469fc0bb46ee0b78f8fa6e15e0d0e0ae39f6f979"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.430061 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" event={"ID":"458d18eb-2c58-43d6-889b-b1ce6f367050","Type":"ContainerStarted","Data":"111d4c9a628bdae6638e7f1882d2d8a980783caf09ecb201d21b4b7c37c24ba4"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.431015 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.432480 4793 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jtc2j container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.432613 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" podUID="458d18eb-2c58-43d6-889b-b1ce6f367050" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.434789 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" event={"ID":"02367c13-c0f2-4600-9baa-1a55f0f50e8b","Type":"ContainerStarted","Data":"3ec1f115899f9165d2f8d288d161115ed29f67ab4dd1792fb497dd5c2d153842"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.437684 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" event={"ID":"7be50072-4d5b-4ef3-a534-bdce40d627cb","Type":"ContainerStarted","Data":"1ec5e5b0cb1234bccca60d2fece9d6b8eb955456a06f4a02e94b8c981ede105c"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.459582 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" podStartSLOduration=126.459563236 podStartE2EDuration="2m6.459563236s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.458485256 +0000 UTC m=+148.848738432" watchObservedRunningTime="2026-01-27 20:05:23.459563236 +0000 UTC m=+148.849816392" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.459694 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-n24xk" podStartSLOduration=126.459688828 podStartE2EDuration="2m6.459688828s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.43894788 +0000 UTC m=+148.829201036" watchObservedRunningTime="2026-01-27 20:05:23.459688828 +0000 UTC m=+148.849941984" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.506974 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.508110 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.008094207 +0000 UTC m=+149.398347363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.515738 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" event={"ID":"bf2f529a-ec6c-417d-bee8-7520d9b3d41c","Type":"ContainerStarted","Data":"6682cafae3290de1688fbbc2f37b505547df37242dccf6420c6dc34f0d6254c2"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.522666 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" event={"ID":"ba1e1437-9755-498f-b07f-40997bdfa64c","Type":"ContainerStarted","Data":"382f462e45533bc72d53ca46998b67c434d43401d716e53f7ff3ee9a4c153c84"} Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526697 4793 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-48bz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526749 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526765 4793 patch_prober.go:28] interesting pod/console-operator-58897d9998-4nvlb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526785 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526820 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" podUID="1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.526849 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.539785 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nqccb" podStartSLOduration=126.539763438 podStartE2EDuration="2m6.539763438s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.536679264 +0000 UTC m=+148.926932420" watchObservedRunningTime="2026-01-27 20:05:23.539763438 +0000 UTC m=+148.930016614" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.540463 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-sghkk" podStartSLOduration=126.540454291 podStartE2EDuration="2m6.540454291s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:23.513826389 +0000 UTC m=+148.904079545" watchObservedRunningTime="2026-01-27 20:05:23.540454291 +0000 UTC m=+148.930707447" Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.608265 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.608482 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.108450837 +0000 UTC m=+149.498703993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.608959 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.610989 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.110981262 +0000 UTC m=+149.501234418 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.710399 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.710596 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.210572418 +0000 UTC m=+149.600825574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.710763 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.711089 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.211078068 +0000 UTC m=+149.601331224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.811849 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.812321 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.312303013 +0000 UTC m=+149.702556179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:23 crc kubenswrapper[4793]: I0127 20:05:23.941397 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:23 crc kubenswrapper[4793]: E0127 20:05:23.941820 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.44180721 +0000 UTC m=+149.832060366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.041949 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.042341 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.542323003 +0000 UTC m=+149.932576159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.099473 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.099513 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.177839 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.178210 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.678196424 +0000 UTC m=+150.068449580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.278853 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.279042 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.779009852 +0000 UTC m=+150.169263008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.279441 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.279858 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.779843187 +0000 UTC m=+150.170096343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.422203 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.422376 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.922351665 +0000 UTC m=+150.312604821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.422860 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.423317 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:24.923297291 +0000 UTC m=+150.313550507 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.540707 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.540907 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.040880557 +0000 UTC m=+150.431133743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.541068 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.541410 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.041400217 +0000 UTC m=+150.431653373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.568835 4793 csr.go:261] certificate signing request csr-98wwk is approved, waiting to be issued Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.573975 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" event={"ID":"a2c79a98-a142-4c13-a989-b2c887f03d46","Type":"ContainerStarted","Data":"c88ca7976e9b4b8884bc4882935672730040b8ce678e88399ae6ca2bb18cf064"} Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.576883 4793 csr.go:257] certificate signing request csr-98wwk is issued Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.582764 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" event={"ID":"e6ffbe35-d1b5-48b0-8481-d788b0801196","Type":"ContainerStarted","Data":"e7691614c1270698b2097542cbad2f836323a83c339da585bd39979da1adf333"} Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.605318 4793 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vbqcn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.605356 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.605407 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" event={"ID":"c54305b3-f1c3-44b0-a77a-47f4cd78794c","Type":"ContainerStarted","Data":"b50e6243f6747547e9ffff2f6640bcb444aaaa5505c6de24284116dc43247eb0"} Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.606257 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.606306 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.607012 4793 patch_prober.go:28] interesting pod/console-operator-58897d9998-4nvlb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.607060 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" podUID="1d3c0a26-7fdb-48b7-9d95-7693ed59d5ae" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.612102 4793 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jtc2j container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.612129 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" podUID="458d18eb-2c58-43d6-889b-b1ce6f367050" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.617354 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-s7j86" podStartSLOduration=127.617344574 podStartE2EDuration="2m7.617344574s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.616106822 +0000 UTC m=+150.006359988" watchObservedRunningTime="2026-01-27 20:05:24.617344574 +0000 UTC m=+150.007597730" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.642047 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.643626 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.14360124 +0000 UTC m=+150.533854456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.711361 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" podStartSLOduration=127.711339611 podStartE2EDuration="2m7.711339611s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.709524679 +0000 UTC m=+150.099777855" watchObservedRunningTime="2026-01-27 20:05:24.711339611 +0000 UTC m=+150.101592767" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.711989 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4lgxd" podStartSLOduration=127.711980542 podStartE2EDuration="2m7.711980542s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.675612747 +0000 UTC m=+150.065865913" watchObservedRunningTime="2026-01-27 20:05:24.711980542 +0000 UTC m=+150.102233708" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.736035 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-c22sx" podStartSLOduration=128.736018819 podStartE2EDuration="2m8.736018819s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.735681402 +0000 UTC m=+150.125934568" watchObservedRunningTime="2026-01-27 20:05:24.736018819 +0000 UTC m=+150.126271975" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.743893 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.743951 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.743980 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.744030 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.744085 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.744755 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.244739334 +0000 UTC m=+150.634992490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.750642 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.773191 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7hp8w" podStartSLOduration=127.773167458 podStartE2EDuration="2m7.773167458s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.770716644 +0000 UTC m=+150.160969800" watchObservedRunningTime="2026-01-27 20:05:24.773167458 +0000 UTC m=+150.163420614" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.787879 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.788466 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.789073 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.796261 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" podStartSLOduration=127.796243318 podStartE2EDuration="2m7.796243318s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.795184949 +0000 UTC m=+150.185438105" watchObservedRunningTime="2026-01-27 20:05:24.796243318 +0000 UTC m=+150.186496474" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.823995 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.834374 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.844993 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.845213 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.345187966 +0000 UTC m=+150.735441122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.845340 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.845706 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.345689544 +0000 UTC m=+150.735942700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.870739 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" podStartSLOduration=127.870721879 podStartE2EDuration="2m7.870721879s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:24.869019319 +0000 UTC m=+150.259272485" watchObservedRunningTime="2026-01-27 20:05:24.870721879 +0000 UTC m=+150.260975045" Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.947131 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.953414 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.453376395 +0000 UTC m=+150.843629561 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:24 crc kubenswrapper[4793]: I0127 20:05:24.953538 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:24 crc kubenswrapper[4793]: E0127 20:05:24.953936 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.453926284 +0000 UTC m=+150.844179440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.089695 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.089762 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.090052 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.590035248 +0000 UTC m=+150.980288414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.180200 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:25 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:25 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:25 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.180266 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.194935 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.195285 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.695272716 +0000 UTC m=+151.085525872 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.295809 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.295913 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.79588901 +0000 UTC m=+151.186142176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.296384 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.296852 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.796842447 +0000 UTC m=+151.187095673 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.398951 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.399576 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:25.89955909 +0000 UTC m=+151.289812246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.500880 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.501261 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.001246393 +0000 UTC m=+151.391499549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.578021 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-27 20:00:24 +0000 UTC, rotation deadline is 2026-12-15 12:13:25.721394737 +0000 UTC Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.578102 4793 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7720h8m0.143299651s for next certificate rotation Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.606661 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.608507 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.108487846 +0000 UTC m=+151.498741002 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.608611 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.609159 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.109147777 +0000 UTC m=+151.499400933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.709998 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.710600 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.210585586 +0000 UTC m=+151.600838742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.813111 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.813370 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.313359179 +0000 UTC m=+151.703612335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.840136 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" event={"ID":"6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed","Type":"ContainerStarted","Data":"881569212b97cd52196fe26d6df6ba258b5a6fb854e5439087d1d8a136318c1a"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.841314 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sn9b2" event={"ID":"df0b32f8-987a-415d-a22e-c396f3bbaa8f","Type":"ContainerStarted","Data":"42260df5e41a2f16d7cc73d5f00f00cb78d22544d6132388d5fe595b5b565309"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.841851 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.843348 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" event={"ID":"c26f1fef-b150-4021-8cfc-c08128248f8a","Type":"ContainerStarted","Data":"621b5c3d985796f5485b61b02d2bdf6cc2f623e21a9356b242941242745d70c3"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.844808 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-nzg26" event={"ID":"87824188-046e-4f32-b8da-016605488fca","Type":"ContainerStarted","Data":"f5dcdd086cb538b99f2e9fb4ca906585f4c86416b1ed6b90eed6a8c111ee4a81"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.845971 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" event={"ID":"c2f53dac-143e-4ab4-b76d-cc7bf64f8df4","Type":"ContainerStarted","Data":"ed067ab5f7bde98eb1c34cdfece32870f4bb6caadb35746fd61702c5b604679b"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.848035 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" event={"ID":"11588ab1-689a-4227-a887-a57b945807a2","Type":"ContainerStarted","Data":"59a65e3215a81d98738e1626c562a1b04b0e491558cf2fe85839854ff5c573b7"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.849641 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" event={"ID":"25a5a5d9-dabe-4487-8bcc-0c2c9d67ddc3","Type":"ContainerStarted","Data":"5f496587c3a12a92ab889ce53a6a1a1d336a4959a9ebab953526db61225f9c7e"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.855837 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" event={"ID":"eac40cfc-8509-4b68-9962-4c2e602d155f","Type":"ContainerStarted","Data":"e7809760bcdf15a70e9a3eea44acefc10abe5eb0dc9e7f583407ed5779fdfdfc"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.914180 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:25 crc kubenswrapper[4793]: E0127 20:05:25.914475 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.414462223 +0000 UTC m=+151.804715379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.924496 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-htfcv" event={"ID":"104d79b7-c0c6-4cde-a7c2-d60a06a38647","Type":"ContainerStarted","Data":"75313c5e29fe00f0f92d2e410b6ec359653412a7701fec2d0734a5c5a1f44a12"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.927054 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" event={"ID":"b1286791-4cc8-4da3-8450-5bf2e4dc577d","Type":"ContainerStarted","Data":"1fce55e03e48520fe0c1605fe2dbdbab49ff37637706e7a36a3f469f277de792"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.927383 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.928694 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" event={"ID":"0c277ffe-d148-4407-9d6e-bb81b17724ac","Type":"ContainerStarted","Data":"4ca4b62264b79b77b94d0586ee739bb208908cc99865c57d252f2838f66b3849"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.929459 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" event={"ID":"10cac1f8-9f3e-43a3-9061-ff12b7cecbb2","Type":"ContainerStarted","Data":"81451f6fdebb18526e88fbe6ec96d67a97e5d46f0682da370cd92c4faa5cfe88"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.930626 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" event={"ID":"470ff764-d3e6-48a6-aa1b-b4777a1d746f","Type":"ContainerStarted","Data":"7db64802125589ae67628bf36b2a4093c519ff5fce18774bf9f93e889f120849"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.931198 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.932895 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" event={"ID":"30eb86e9-4989-4a95-bd07-07ff6a872298","Type":"ContainerStarted","Data":"45cddb2f4d491ccecdb653c404c14d612e7548ee7460352d2e951a3c046e13ce"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.933199 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.960261 4793 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zw89c container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.960318 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.968665 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" event={"ID":"d14aca5a-f217-4b87-bbcd-431b01ec7511","Type":"ContainerStarted","Data":"8fe33c3e4ab8333a162bae89cf94084bb596db9841afbc0fccaab5d8e06f0674"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.968695 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" event={"ID":"d14aca5a-f217-4b87-bbcd-431b01ec7511","Type":"ContainerStarted","Data":"a85b9c2b4a0b1cafe22a28d240f413749fa80eb0b442d1e606eed3217c416f37"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.984290 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" event={"ID":"e68f3762-2025-4800-98ff-ca440c176b45","Type":"ContainerStarted","Data":"a316e8a515fe601fa282b55ae6c573df483a95c03618ef1d2dffb01e7702c8bb"} Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.984444 4793 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jtc2j container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.984476 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" podUID="458d18eb-2c58-43d6-889b-b1ce6f367050" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.984798 4793 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vbqcn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 27 20:05:25 crc kubenswrapper[4793]: I0127 20:05:25.984838 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.020967 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.023151 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.523135571 +0000 UTC m=+151.913388727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.111921 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:26 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:26 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:26 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.112321 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.124248 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.126309 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.626278491 +0000 UTC m=+152.016531657 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: W0127 20:05:26.200575 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-606457f5359127fd6ed942836ad4bca68f23edbc07767ea84d17ef177f2f68ed WatchSource:0}: Error finding container 606457f5359127fd6ed942836ad4bca68f23edbc07767ea84d17ef177f2f68ed: Status 404 returned error can't find the container with id 606457f5359127fd6ed942836ad4bca68f23edbc07767ea84d17ef177f2f68ed Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.226384 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.226820 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.726804963 +0000 UTC m=+152.117058119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.253301 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ljlt5" podStartSLOduration=129.253279633 podStartE2EDuration="2m9.253279633s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.252412638 +0000 UTC m=+151.642665794" watchObservedRunningTime="2026-01-27 20:05:26.253279633 +0000 UTC m=+151.643532789" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.283192 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.283224 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.291932 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-sn9b2" podStartSLOduration=11.291914378 podStartE2EDuration="11.291914378s" podCreationTimestamp="2026-01-27 20:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.29088493 +0000 UTC m=+151.681138086" watchObservedRunningTime="2026-01-27 20:05:26.291914378 +0000 UTC m=+151.682167534" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.297508 4793 patch_prober.go:28] interesting pod/apiserver-76f77b778f-v8r5p container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]log ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]etcd ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/generic-apiserver-start-informers ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/max-in-flight-filter ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 27 20:05:26 crc kubenswrapper[4793]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/project.openshift.io-projectcache ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/openshift.io-startinformers ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 27 20:05:26 crc kubenswrapper[4793]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 27 20:05:26 crc kubenswrapper[4793]: livez check failed Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.297595 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" podUID="af46e162-c595-4a44-98e5-a30e531aa9ed" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.328746 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.329215 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.829196629 +0000 UTC m=+152.219449785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.339102 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podStartSLOduration=129.339078495 podStartE2EDuration="2m9.339078495s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.328250473 +0000 UTC m=+151.718503629" watchObservedRunningTime="2026-01-27 20:05:26.339078495 +0000 UTC m=+151.729331651" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.394463 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2qgvl" podStartSLOduration=129.394445027 podStartE2EDuration="2m9.394445027s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.362903567 +0000 UTC m=+151.753156733" watchObservedRunningTime="2026-01-27 20:05:26.394445027 +0000 UTC m=+151.784698183" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.395525 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bnv2v" podStartSLOduration=129.395518216 podStartE2EDuration="2m9.395518216s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.393414318 +0000 UTC m=+151.783667474" watchObservedRunningTime="2026-01-27 20:05:26.395518216 +0000 UTC m=+151.785771372" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.418132 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" podStartSLOduration=129.418114537 podStartE2EDuration="2m9.418114537s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.417322692 +0000 UTC m=+151.807575848" watchObservedRunningTime="2026-01-27 20:05:26.418114537 +0000 UTC m=+151.808367693" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.432052 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.434064 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:26.934051799 +0000 UTC m=+152.324304955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.450164 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-gzpt9" podStartSLOduration=129.450144795 podStartE2EDuration="2m9.450144795s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.448867722 +0000 UTC m=+151.839120878" watchObservedRunningTime="2026-01-27 20:05:26.450144795 +0000 UTC m=+151.840397951" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.473417 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" podStartSLOduration=130.473399667 podStartE2EDuration="2m10.473399667s" podCreationTimestamp="2026-01-27 20:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.470679539 +0000 UTC m=+151.860932695" watchObservedRunningTime="2026-01-27 20:05:26.473399667 +0000 UTC m=+151.863652823" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.501809 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" podStartSLOduration=129.501791491 podStartE2EDuration="2m9.501791491s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.500766862 +0000 UTC m=+151.891020028" watchObservedRunningTime="2026-01-27 20:05:26.501791491 +0000 UTC m=+151.892044647" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.521922 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-m6tz2" podStartSLOduration=129.521899197 podStartE2EDuration="2m9.521899197s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.518380815 +0000 UTC m=+151.908633981" watchObservedRunningTime="2026-01-27 20:05:26.521899197 +0000 UTC m=+151.912152363" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.586246 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.586604 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.086588985 +0000 UTC m=+152.476842141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.689783 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.690178 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.190163712 +0000 UTC m=+152.580417008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.855775 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.856085 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.356068275 +0000 UTC m=+152.746321431 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.936336 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-shv5g" podStartSLOduration=129.936318649 podStartE2EDuration="2m9.936318649s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.595202917 +0000 UTC m=+151.985456073" watchObservedRunningTime="2026-01-27 20:05:26.936318649 +0000 UTC m=+152.326571805" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.937340 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-nzg26" podStartSLOduration=11.937335767 podStartE2EDuration="11.937335767s" podCreationTimestamp="2026-01-27 20:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:26.928569722 +0000 UTC m=+152.318822878" watchObservedRunningTime="2026-01-27 20:05:26.937335767 +0000 UTC m=+152.327588923" Jan 27 20:05:26 crc kubenswrapper[4793]: I0127 20:05:26.969173 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:26 crc kubenswrapper[4793]: E0127 20:05:26.970070 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.470051287 +0000 UTC m=+152.860304453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.050930 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" event={"ID":"eac40cfc-8509-4b68-9962-4c2e602d155f","Type":"ContainerStarted","Data":"2ab2a3f3b8934c35a0abfac642b5f8de60392cc2a68fb0507abb1d35e74c71a3"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.080442 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.088214 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.588190773 +0000 UTC m=+152.978443929 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.187789 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.189021 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.689007429 +0000 UTC m=+153.079260585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.189972 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"e0fa021986e759ef2dcf841b5d6dee185d9575805601c0a894f2aa0b0464b784"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.190117 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"606457f5359127fd6ed942836ad4bca68f23edbc07767ea84d17ef177f2f68ed"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.197434 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:27 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:27 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:27 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.197479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b1a746bfebafd518768f8f6f00ef53682cc81cbbbd394099ae63918aa18f8b19"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.197496 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.197523 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d0ceebb61f92ab54cf609f9d94f1e5ccb1568b92371c415647d5132602625d29"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.216020 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" event={"ID":"0c277ffe-d148-4407-9d6e-bb81b17724ac","Type":"ContainerStarted","Data":"43991ca4fb3085ba78acf386a317e2a236585a74b61c17ec634d402741382da3"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.225231 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e24ea7bc9a68e1f26ccd3437a2d35619a5c928e6f990165aceebb5fc863dca3b"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.225277 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7e87569002099d7a1356d94ca22a197a3684ef0d6f7a56d1dc22a458deb8e139"} Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.225742 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.242680 4793 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2zxw9 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.242762 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" podUID="30eb86e9-4989-4a95-bd07-07ff6a872298" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.245261 4793 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zw89c container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.245309 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.255163 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" podStartSLOduration=130.255146073 podStartE2EDuration="2m10.255146073s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:27.217203949 +0000 UTC m=+152.607457105" watchObservedRunningTime="2026-01-27 20:05:27.255146073 +0000 UTC m=+152.645399229" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.291095 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.292051 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.792035268 +0000 UTC m=+153.182288424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.393671 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.398266 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:27.898251873 +0000 UTC m=+153.288505129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.430030 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-gkcks" podStartSLOduration=130.430010246 podStartE2EDuration="2m10.430010246s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:27.301228632 +0000 UTC m=+152.691481808" watchObservedRunningTime="2026-01-27 20:05:27.430010246 +0000 UTC m=+152.820263402" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.518304 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.518450 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.018429335 +0000 UTC m=+153.408682491 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.518659 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.519122 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.019103966 +0000 UTC m=+153.409357122 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.528721 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.532228 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.542005 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-89kfv" podStartSLOduration=130.541983763 podStartE2EDuration="2m10.541983763s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:27.539824174 +0000 UTC m=+152.930077350" watchObservedRunningTime="2026-01-27 20:05:27.541983763 +0000 UTC m=+152.932236919" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.619684 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.619821 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.119802563 +0000 UTC m=+153.510055719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.619975 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.620667 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.120640848 +0000 UTC m=+153.510894074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.635482 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.635566 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.639654 4793 patch_prober.go:28] interesting pod/console-f9d7485db-slbcq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.640036 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-slbcq" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.656774 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.656815 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.662838 4793 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4j4j container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.12:8443/livez\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.662898 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" podUID="6fdb1ea3-a40f-4c62-981a-d3ef9e49e2ed" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.12:8443/livez\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.721345 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.721464 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.221446256 +0000 UTC m=+153.611699422 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.721569 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.722531 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.222512035 +0000 UTC m=+153.612765191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.823074 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.823470 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.323453376 +0000 UTC m=+153.713706532 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.833924 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.833989 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.834006 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.834074 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.866325 4793 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vbqcn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.866419 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.866632 4793 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vbqcn container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.866680 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Jan 27 20:05:27 crc kubenswrapper[4793]: I0127 20:05:27.924420 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:27 crc kubenswrapper[4793]: E0127 20:05:27.924857 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.424836214 +0000 UTC m=+153.815089370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.166343 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.166879 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.167106 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.667079481 +0000 UTC m=+154.057332637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.167669 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.168579 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.168942 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.668925283 +0000 UTC m=+154.059178519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.176431 4793 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-vlz9q container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.176472 4793 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-vlz9q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.176510 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" podUID="02367c13-c0f2-4600-9baa-1a55f0f50e8b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.176596 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" podUID="02367c13-c0f2-4600-9baa-1a55f0f50e8b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.177042 4793 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-vlz9q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.177071 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" podUID="02367c13-c0f2-4600-9baa-1a55f0f50e8b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.182386 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:28 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:28 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:28 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.182430 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.242803 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244015 4793 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-d7pdv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244060 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" podUID="9612bf32-849b-40f1-bf8a-cada1f25acf5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244281 4793 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-d7pdv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244299 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" podUID="9612bf32-849b-40f1-bf8a-cada1f25acf5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244352 4793 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-d7pdv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.244390 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" podUID="9612bf32-849b-40f1-bf8a-cada1f25acf5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.268716 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.268919 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.768884697 +0000 UTC m=+154.159137853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.269043 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.269375 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.769360885 +0000 UTC m=+154.159614041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301013 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-95vph" event={"ID":"ce69b998-69b8-46f8-b72b-83aa741479da","Type":"ContainerStarted","Data":"9017336107e6a1c64b0565f0592dacb5d29c08847ccf8827ce812db61d10df89"} Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301095 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-95vph" event={"ID":"ce69b998-69b8-46f8-b72b-83aa741479da","Type":"ContainerStarted","Data":"b5dfcc101244c93ac058a1491f815bfbd488e9094e9044f68501edec306c57b7"} Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301598 4793 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zw89c container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301626 4793 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2zxw9 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301659 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" podUID="30eb86e9-4989-4a95-bd07-07ff6a872298" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.301653 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.349513 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jtc2j" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.369946 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.370141 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.870115002 +0000 UTC m=+154.260368158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.370764 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.371460 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.871445396 +0000 UTC m=+154.261698552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.421980 4793 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zw89c container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.422044 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.471125 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.471298 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:28.971268027 +0000 UTC m=+154.361521183 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.481085 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-4nvlb" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.523381 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.524290 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.527068 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.528620 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.536009 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.572278 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.572357 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.572403 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.572711 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.072694736 +0000 UTC m=+154.462947892 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.673418 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.673563 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.173531655 +0000 UTC m=+154.563784811 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.673611 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.673663 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.673710 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.673848 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.673924 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.173916652 +0000 UTC m=+154.564169808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.700958 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.776678 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.776977 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.27695056 +0000 UTC m=+154.667203716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.777083 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.777491 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.277475038 +0000 UTC m=+154.667728194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.857374 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.891970 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.892647 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.392629861 +0000 UTC m=+154.782883017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:28 crc kubenswrapper[4793]: I0127 20:05:28.993756 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:28 crc kubenswrapper[4793]: E0127 20:05:28.994094 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.494080211 +0000 UTC m=+154.884333377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.094469 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.094763 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.594740637 +0000 UTC m=+154.984993793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.104931 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:29 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:29 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:29 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.105016 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.204621 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.205036 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.705021953 +0000 UTC m=+155.095275109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.305786 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.306179 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.806164928 +0000 UTC m=+155.196418084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.319941 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-95vph" event={"ID":"ce69b998-69b8-46f8-b72b-83aa741479da","Type":"ContainerStarted","Data":"b0ca6512287ef54081b7856ef8dbae886b368bcd742ef609fce095616a420d0a"} Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.391121 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-95vph" podStartSLOduration=14.391102604 podStartE2EDuration="14.391102604s" podCreationTimestamp="2026-01-27 20:05:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:29.389493775 +0000 UTC m=+154.779746951" watchObservedRunningTime="2026-01-27 20:05:29.391102604 +0000 UTC m=+154.781355760" Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.407533 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.407864 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:29.907852711 +0000 UTC m=+155.298105867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.510081 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.510598 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.010579554 +0000 UTC m=+155.400832720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.611741 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.612068 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.112057074 +0000 UTC m=+155.502310230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.630654 4793 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.702884 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.712497 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.712665 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.212643518 +0000 UTC m=+155.602896674 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.712913 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.713237 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.213227618 +0000 UTC m=+155.603480774 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: W0127 20:05:29.714377 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod52b3dc81_fdb3_43fb_9b34_e1ac6dfc33da.slice/crio-bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd WatchSource:0}: Error finding container bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd: Status 404 returned error can't find the container with id bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.814523 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.814678 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.314647678 +0000 UTC m=+155.704900834 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.815082 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.815430 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.315414641 +0000 UTC m=+155.705667797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.893454 4793 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2zxw9 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.893494 4793 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2zxw9 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.893506 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" podUID="30eb86e9-4989-4a95-bd07-07ff6a872298" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.893571 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" podUID="30eb86e9-4989-4a95-bd07-07ff6a872298" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.916113 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.916291 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.41626658 +0000 UTC m=+155.806519736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:29 crc kubenswrapper[4793]: I0127 20:05:29.916504 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:29 crc kubenswrapper[4793]: E0127 20:05:29.916821 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.4168089 +0000 UTC m=+155.807062056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.017452 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.017696 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.517664199 +0000 UTC m=+155.907917365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.017801 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.018114 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.518099216 +0000 UTC m=+155.908352382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.151655 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.151768 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.651748388 +0000 UTC m=+156.042001544 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.151973 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.152244 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.652237466 +0000 UTC m=+156.042490612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.164963 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:30 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:30 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:30 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.165045 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.252924 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.253092 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.753046144 +0000 UTC m=+156.143299310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.253238 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.253651 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.753638734 +0000 UTC m=+156.143891970 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.329063 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da","Type":"ContainerStarted","Data":"c89c71d9d552358ff8f90667326dbd5cc54fdd5c6ce63ef13a465316ae76d23c"} Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.329418 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da","Type":"ContainerStarted","Data":"bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd"} Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.355145 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.355323 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.855294348 +0000 UTC m=+156.245547514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.355889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.356317 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.856306786 +0000 UTC m=+156.246559942 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.456692 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.456965 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.956916431 +0000 UTC m=+156.347169607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.457020 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.457356 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:30.957346138 +0000 UTC m=+156.347599294 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.549944 4793 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-27T20:05:29.630710674Z","Handler":null,"Name":""} Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.557835 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.558016 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 20:05:31.057982594 +0000 UTC m=+156.448235750 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.558079 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.558413 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 20:05:31.058401041 +0000 UTC m=+156.448654187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v8ht5" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.576599 4793 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.576643 4793 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.658789 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.671256 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.672622 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: W0127 20:05:30.674201 4793 reflector.go:561] object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g": failed to list *v1.Secret: secrets "certified-operators-dockercfg-4rs5g" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Jan 27 20:05:30 crc kubenswrapper[4793]: E0127 20:05:30.674246 4793 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-4rs5g\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"certified-operators-dockercfg-4rs5g\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.675882 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.688154 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.759811 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.769322 4793 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.769383 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.845135 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.846063 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.848036 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.860690 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.860770 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.860874 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8qvq\" (UniqueName: \"kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.932420 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.961353 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v8ht5\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962191 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962280 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962363 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgjqm\" (UniqueName: \"kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962395 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962973 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8qvq\" (UniqueName: \"kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.963025 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962791 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:30 crc kubenswrapper[4793]: I0127 20:05:30.962867 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.036649 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.039769 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.040971 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8qvq\" (UniqueName: \"kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq\") pod \"certified-operators-lcdjd\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.061249 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.063804 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.063856 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgjqm\" (UniqueName: \"kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.063954 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.064455 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.064598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.101434 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgjqm\" (UniqueName: \"kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm\") pod \"community-operators-n2ntn\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.115636 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:31 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:31 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:31 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.115718 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.149945 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.166519 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.173703 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.173801 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk97f\" (UniqueName: \"kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.173892 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.216373 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.225289 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.227362 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275121 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275162 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275183 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk97f\" (UniqueName: \"kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275214 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9btv\" (UniqueName: \"kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275252 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275289 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.275902 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.276159 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.309670 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk97f\" (UniqueName: \"kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f\") pod \"certified-operators-qmlkz\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.309711 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.335788 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-v8r5p" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.467183 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.467291 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.467401 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9btv\" (UniqueName: \"kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.468645 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.469488 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.540372 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9btv\" (UniqueName: \"kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv\") pod \"community-operators-jh9bl\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.540810 4793 generic.go:334] "Generic (PLEG): container finished" podID="52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" containerID="c89c71d9d552358ff8f90667326dbd5cc54fdd5c6ce63ef13a465316ae76d23c" exitCode=0 Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.541679 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da","Type":"ContainerDied","Data":"c89c71d9d552358ff8f90667326dbd5cc54fdd5c6ce63ef13a465316ae76d23c"} Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.585808 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.729386 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.731611 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.732306 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:05:31 crc kubenswrapper[4793]: I0127 20:05:31.828532 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.111861 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:32 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:32 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:32 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.111919 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.140328 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:05:32 crc kubenswrapper[4793]: W0127 20:05:32.168774 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bcb229d_7351_4a1f_9a61_8c54a7ee039c.slice/crio-79b699cbe7ef984c47848ac6f9549a3f9058df7f4fe8aa1da65e4c84b10df874 WatchSource:0}: Error finding container 79b699cbe7ef984c47848ac6f9549a3f9058df7f4fe8aa1da65e4c84b10df874: Status 404 returned error can't find the container with id 79b699cbe7ef984c47848ac6f9549a3f9058df7f4fe8aa1da65e4c84b10df874 Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.258179 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.267265 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.270577 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.276558 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.277175 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.402174 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.402245 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.428400 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.503485 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.503597 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.503990 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.564586 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.581071 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.603104 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerStarted","Data":"556990d3d93a4567156f437f38e11621b8d93e56a13efb41a7fc1a5b1ca6aec3"} Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.628820 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerStarted","Data":"804ecdda7805934fd8effe9da766db00a3bb7e762ecb93afa307b9bae38d0eae"} Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.628878 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerStarted","Data":"79b699cbe7ef984c47848ac6f9549a3f9058df7f4fe8aa1da65e4c84b10df874"} Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.716182 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.719628 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.724351 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.738407 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.745195 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.786422 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.826332 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.833806 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.850359 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4j4j" Jan 27 20:05:32 crc kubenswrapper[4793]: I0127 20:05:32.912762 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2zxw9" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.044566 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb6cz\" (UniqueName: \"kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.044694 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.044755 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.099814 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-sn9b2" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.103007 4793 patch_prober.go:28] interesting pod/router-default-5444994796-9fj82 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 20:05:33 crc kubenswrapper[4793]: [-]has-synced failed: reason withheld Jan 27 20:05:33 crc kubenswrapper[4793]: [+]process-running ok Jan 27 20:05:33 crc kubenswrapper[4793]: healthz check failed Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.103033 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.103044 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9fj82" podUID="ab3e57d5-0315-4083-b0d8-80af81ba8ea0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.115953 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.151795 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152248 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152293 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152329 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8mtz\" (UniqueName: \"kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152407 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152426 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb6cz\" (UniqueName: \"kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.152442 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.154129 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.154345 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.217694 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.221439 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb6cz\" (UniqueName: \"kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz\") pod \"redhat-marketplace-tcslq\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.253689 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8mtz\" (UniqueName: \"kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.253782 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.253813 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.256394 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.257023 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.314437 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8mtz\" (UniqueName: \"kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz\") pod \"redhat-marketplace-ch6xq\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.464693 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.470854 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:05:33 crc kubenswrapper[4793]: W0127 20:05:33.633050 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod345c96d4_a84a_4d09_9d94_f68e4c3bff9b.slice/crio-3ac637c5af0402a1b0b4f7d7b0ca255e3a2b5a3273be466c597b433ff6844572 WatchSource:0}: Error finding container 3ac637c5af0402a1b0b4f7d7b0ca255e3a2b5a3273be466c597b433ff6844572: Status 404 returned error can't find the container with id 3ac637c5af0402a1b0b4f7d7b0ca255e3a2b5a3273be466c597b433ff6844572 Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.647651 4793 generic.go:334] "Generic (PLEG): container finished" podID="11588ab1-689a-4227-a887-a57b945807a2" containerID="59a65e3215a81d98738e1626c562a1b04b0e491558cf2fe85839854ff5c573b7" exitCode=0 Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.647748 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" event={"ID":"11588ab1-689a-4227-a887-a57b945807a2","Type":"ContainerDied","Data":"59a65e3215a81d98738e1626c562a1b04b0e491558cf2fe85839854ff5c573b7"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.682301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" event={"ID":"28a34749-2dfc-4164-a7b9-016f47e098cd","Type":"ContainerStarted","Data":"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.683033 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" event={"ID":"28a34749-2dfc-4164-a7b9-016f47e098cd","Type":"ContainerStarted","Data":"f40a9da6d2ca279328efe414b2196f624a546c3940cc690e62d940c9c65e0bcf"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.683757 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.703221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerStarted","Data":"3a4a5e79ec2ee1178188dec61e4fb57a1b40f2ceff6bd8545af8e26ed8355184"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.703273 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerStarted","Data":"424fac4d77a4a63c61d10b875ca890f8eb080fba4101b18ec16eca039ec4d6f7"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.730037 4793 generic.go:334] "Generic (PLEG): container finished" podID="18834635-b900-480e-844b-4c075b169d4a" containerID="e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe" exitCode=0 Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.730102 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerDied","Data":"e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.758391 4793 generic.go:334] "Generic (PLEG): container finished" podID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerID="804ecdda7805934fd8effe9da766db00a3bb7e762ecb93afa307b9bae38d0eae" exitCode=0 Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.759289 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerDied","Data":"804ecdda7805934fd8effe9da766db00a3bb7e762ecb93afa307b9bae38d0eae"} Jan 27 20:05:33 crc kubenswrapper[4793]: I0127 20:05:33.780913 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" podStartSLOduration=136.780893104 podStartE2EDuration="2m16.780893104s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:33.778213326 +0000 UTC m=+159.168466482" watchObservedRunningTime="2026-01-27 20:05:33.780893104 +0000 UTC m=+159.171146260" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.058322 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.060183 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.069194 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.073389 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.105058 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.107132 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.115083 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-9fj82" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.201083 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access\") pod \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.201496 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir\") pod \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\" (UID: \"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da\") " Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.201769 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.201799 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.201856 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x52cs\" (UniqueName: \"kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.203847 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" (UID: "52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.214792 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" (UID: "52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.303559 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.303604 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.303649 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x52cs\" (UniqueName: \"kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.303742 4793 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.303760 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.304678 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.304993 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.463784 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x52cs\" (UniqueName: \"kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs\") pod \"redhat-operators-j7v8p\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.494526 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:05:34 crc kubenswrapper[4793]: E0127 20:05:34.494810 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" containerName="pruner" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.494827 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" containerName="pruner" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.494949 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da" containerName="pruner" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.495865 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.534814 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.580519 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.610597 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.610656 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx95k\" (UniqueName: \"kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.610739 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.701796 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.713249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.713336 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.713363 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx95k\" (UniqueName: \"kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.713883 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.714299 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.764206 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx95k\" (UniqueName: \"kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k\") pod \"redhat-operators-2lmqb\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.801089 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.884447 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.894121 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.903142 4793 generic.go:334] "Generic (PLEG): container finished" podID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerID="5e0dfa79b4dc30a7a2e7ff633cffe59d4e9160af5337cf40ca8c9f886d06bd46" exitCode=0 Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.903351 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerDied","Data":"5e0dfa79b4dc30a7a2e7ff633cffe59d4e9160af5337cf40ca8c9f886d06bd46"} Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.903635 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerStarted","Data":"3ac637c5af0402a1b0b4f7d7b0ca255e3a2b5a3273be466c597b433ff6844572"} Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.909620 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2b8d1e1f-e58d-4080-acdb-0e762f74e141","Type":"ContainerStarted","Data":"6abac4862794fd63b5807d820303cf2db141c8a47c01c7c4548218b896a3732f"} Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.914510 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"52b3dc81-fdb3-43fb-9b34-e1ac6dfc33da","Type":"ContainerDied","Data":"bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd"} Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.914577 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfaa7f4c62a2635ecfec52f475219b9408759f417be7bfc09e345ed1ecff0edd" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.914651 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.940799 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerDied","Data":"3a4a5e79ec2ee1178188dec61e4fb57a1b40f2ceff6bd8545af8e26ed8355184"} Jan 27 20:05:34 crc kubenswrapper[4793]: I0127 20:05:34.941658 4793 generic.go:334] "Generic (PLEG): container finished" podID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerID="3a4a5e79ec2ee1178188dec61e4fb57a1b40f2ceff6bd8545af8e26ed8355184" exitCode=0 Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.960613 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.974039 4793 generic.go:334] "Generic (PLEG): container finished" podID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerID="2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e" exitCode=0 Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.976319 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerDied","Data":"2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e"} Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.976352 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerStarted","Data":"050dabdd8984fbf1c4262c9fedf51f0f23cd23563e289d070b4d335afc59ff7d"} Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.981506 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" event={"ID":"11588ab1-689a-4227-a887-a57b945807a2","Type":"ContainerDied","Data":"4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2"} Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.981540 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4cb04f6a3511701cad63b00ae4954b496178562941482f0f037c87d85d19d4e2" Jan 27 20:05:35 crc kubenswrapper[4793]: I0127 20:05:35.981651 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.000269 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerStarted","Data":"819eb6fbed1ba503d50f7093e71b70de43dd474ac1257680a63f552f449911f7"} Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.000306 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerStarted","Data":"d16c4178252403efd4a7f539e92d393fd56d092d6bd43be70405a65fbe6af0b6"} Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.082779 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.163432 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume\") pod \"11588ab1-689a-4227-a887-a57b945807a2\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.163621 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume\") pod \"11588ab1-689a-4227-a887-a57b945807a2\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.163666 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgkjl\" (UniqueName: \"kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl\") pod \"11588ab1-689a-4227-a887-a57b945807a2\" (UID: \"11588ab1-689a-4227-a887-a57b945807a2\") " Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.168269 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume" (OuterVolumeSpecName: "config-volume") pod "11588ab1-689a-4227-a887-a57b945807a2" (UID: "11588ab1-689a-4227-a887-a57b945807a2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.187847 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl" (OuterVolumeSpecName: "kube-api-access-kgkjl") pod "11588ab1-689a-4227-a887-a57b945807a2" (UID: "11588ab1-689a-4227-a887-a57b945807a2"). InnerVolumeSpecName "kube-api-access-kgkjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.189594 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "11588ab1-689a-4227-a887-a57b945807a2" (UID: "11588ab1-689a-4227-a887-a57b945807a2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.272488 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11588ab1-689a-4227-a887-a57b945807a2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.272808 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgkjl\" (UniqueName: \"kubernetes.io/projected/11588ab1-689a-4227-a887-a57b945807a2-kube-api-access-kgkjl\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.272928 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11588ab1-689a-4227-a887-a57b945807a2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:36 crc kubenswrapper[4793]: I0127 20:05:36.334273 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.130892 4793 generic.go:334] "Generic (PLEG): container finished" podID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerID="dc29d462435bebb6bf49c488f5fa6a0fc79502bf1f2e01c6980531de0d7f2d27" exitCode=0 Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.131076 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerDied","Data":"dc29d462435bebb6bf49c488f5fa6a0fc79502bf1f2e01c6980531de0d7f2d27"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.131413 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerStarted","Data":"f605db4bf9b8b3b8e2f89054255aa5fb8e480467fc9338ea3364a45c8e795201"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.134961 4793 generic.go:334] "Generic (PLEG): container finished" podID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerID="0567c91e1e506036e221f2253199d87f41090c09f93c5a922498a1ce3b562f23" exitCode=0 Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.135047 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerDied","Data":"0567c91e1e506036e221f2253199d87f41090c09f93c5a922498a1ce3b562f23"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.135093 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerStarted","Data":"e911b40cfff3c4b8e0fd34892d2b16467bb3be27adf485dec1c0585c2d9f4938"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.142477 4793 generic.go:334] "Generic (PLEG): container finished" podID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerID="819eb6fbed1ba503d50f7093e71b70de43dd474ac1257680a63f552f449911f7" exitCode=0 Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.142642 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerDied","Data":"819eb6fbed1ba503d50f7093e71b70de43dd474ac1257680a63f552f449911f7"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.145907 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2b8d1e1f-e58d-4080-acdb-0e762f74e141","Type":"ContainerStarted","Data":"78ef46a49ef5c021826a15cb00a358137dbe289f9cca20d3d6bad44dd5bda047"} Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.636504 4793 patch_prober.go:28] interesting pod/console-f9d7485db-slbcq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.636572 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-slbcq" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.832422 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.832519 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.832606 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.832523 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:37 crc kubenswrapper[4793]: I0127 20:05:37.882100 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.162534 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vlz9q" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.191793 4793 generic.go:334] "Generic (PLEG): container finished" podID="2b8d1e1f-e58d-4080-acdb-0e762f74e141" containerID="78ef46a49ef5c021826a15cb00a358137dbe289f9cca20d3d6bad44dd5bda047" exitCode=0 Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.191851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2b8d1e1f-e58d-4080-acdb-0e762f74e141","Type":"ContainerDied","Data":"78ef46a49ef5c021826a15cb00a358137dbe289f9cca20d3d6bad44dd5bda047"} Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.208339 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d7pdv" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.427172 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.709152 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.811104 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access\") pod \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.811197 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir\") pod \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\" (UID: \"2b8d1e1f-e58d-4080-acdb-0e762f74e141\") " Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.811492 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2b8d1e1f-e58d-4080-acdb-0e762f74e141" (UID: "2b8d1e1f-e58d-4080-acdb-0e762f74e141"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.915031 4793 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:38 crc kubenswrapper[4793]: I0127 20:05:38.952127 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2b8d1e1f-e58d-4080-acdb-0e762f74e141" (UID: "2b8d1e1f-e58d-4080-acdb-0e762f74e141"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.019997 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b8d1e1f-e58d-4080-acdb-0e762f74e141-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.362821 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2b8d1e1f-e58d-4080-acdb-0e762f74e141","Type":"ContainerDied","Data":"6abac4862794fd63b5807d820303cf2db141c8a47c01c7c4548218b896a3732f"} Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.362894 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6abac4862794fd63b5807d820303cf2db141c8a47c01c7c4548218b896a3732f" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.363026 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.532311 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.539285 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/93412db5-52e2-4b3a-aee4-3c43f090750e-metrics-certs\") pod \"network-metrics-daemon-gsrf9\" (UID: \"93412db5-52e2-4b3a-aee4-3c43f090750e\") " pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:39 crc kubenswrapper[4793]: I0127 20:05:39.817024 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gsrf9" Jan 27 20:05:40 crc kubenswrapper[4793]: I0127 20:05:40.872540 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gsrf9"] Jan 27 20:05:40 crc kubenswrapper[4793]: W0127 20:05:40.909455 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93412db5_52e2_4b3a_aee4_3c43f090750e.slice/crio-e4bfac7479331c8ea3c3f08730148d08455d2cd41ab44db10ceb99f2c1c1c887 WatchSource:0}: Error finding container e4bfac7479331c8ea3c3f08730148d08455d2cd41ab44db10ceb99f2c1c1c887: Status 404 returned error can't find the container with id e4bfac7479331c8ea3c3f08730148d08455d2cd41ab44db10ceb99f2c1c1c887 Jan 27 20:05:41 crc kubenswrapper[4793]: I0127 20:05:41.515059 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" event={"ID":"93412db5-52e2-4b3a-aee4-3c43f090750e","Type":"ContainerStarted","Data":"e4bfac7479331c8ea3c3f08730148d08455d2cd41ab44db10ceb99f2c1c1c887"} Jan 27 20:05:42 crc kubenswrapper[4793]: I0127 20:05:42.604261 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" event={"ID":"93412db5-52e2-4b3a-aee4-3c43f090750e","Type":"ContainerStarted","Data":"b8816517f137b6ad1ed489b1fcb55776bea47681cebfc8c0cc1a4245ec734374"} Jan 27 20:05:43 crc kubenswrapper[4793]: I0127 20:05:43.631004 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gsrf9" event={"ID":"93412db5-52e2-4b3a-aee4-3c43f090750e","Type":"ContainerStarted","Data":"075bb0e411d7b21929edc841ec8345204e8bc3cf54c896bea94576deec4c378f"} Jan 27 20:05:43 crc kubenswrapper[4793]: I0127 20:05:43.653391 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-gsrf9" podStartSLOduration=146.653370681 podStartE2EDuration="2m26.653370681s" podCreationTimestamp="2026-01-27 20:03:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:05:43.651561508 +0000 UTC m=+169.041814694" watchObservedRunningTime="2026-01-27 20:05:43.653370681 +0000 UTC m=+169.043623837" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.644655 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.649700 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.832401 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.832484 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.832567 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.832622 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.873424 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.874099 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.874070 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"216e537348723b7afd6c1593c5a6c4937e4ca4f4d4ddd41015ae1f48872be21c"} pod="openshift-console/downloads-7954f5f757-mq8nr" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.874153 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:47 crc kubenswrapper[4793]: I0127 20:05:47.874160 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" containerID="cri-o://216e537348723b7afd6c1593c5a6c4937e4ca4f4d4ddd41015ae1f48872be21c" gracePeriod=2 Jan 27 20:05:48 crc kubenswrapper[4793]: I0127 20:05:48.731637 4793 generic.go:334] "Generic (PLEG): container finished" podID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerID="216e537348723b7afd6c1593c5a6c4937e4ca4f4d4ddd41015ae1f48872be21c" exitCode=0 Jan 27 20:05:48 crc kubenswrapper[4793]: I0127 20:05:48.731678 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mq8nr" event={"ID":"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a","Type":"ContainerDied","Data":"216e537348723b7afd6c1593c5a6c4937e4ca4f4d4ddd41015ae1f48872be21c"} Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.438338 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.438964 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" containerID="cri-o://7db64802125589ae67628bf36b2a4093c519ff5fce18774bf9f93e889f120849" gracePeriod=30 Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.441014 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.441233 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" containerID="cri-o://dc3dde5dcd9427093a4dc20ba3845e9ba056034aa9ca5426676af71b1680e65e" gracePeriod=30 Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.742496 4793 generic.go:334] "Generic (PLEG): container finished" podID="aa065492-723b-40dd-9259-1a4452804068" containerID="dc3dde5dcd9427093a4dc20ba3845e9ba056034aa9ca5426676af71b1680e65e" exitCode=0 Jan 27 20:05:49 crc kubenswrapper[4793]: I0127 20:05:49.742563 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" event={"ID":"aa065492-723b-40dd-9259-1a4452804068","Type":"ContainerDied","Data":"dc3dde5dcd9427093a4dc20ba3845e9ba056034aa9ca5426676af71b1680e65e"} Jan 27 20:05:50 crc kubenswrapper[4793]: I0127 20:05:50.754190 4793 generic.go:334] "Generic (PLEG): container finished" podID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerID="7db64802125589ae67628bf36b2a4093c519ff5fce18774bf9f93e889f120849" exitCode=0 Jan 27 20:05:50 crc kubenswrapper[4793]: I0127 20:05:50.754238 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" event={"ID":"470ff764-d3e6-48a6-aa1b-b4777a1d746f","Type":"ContainerDied","Data":"7db64802125589ae67628bf36b2a4093c519ff5fce18774bf9f93e889f120849"} Jan 27 20:05:51 crc kubenswrapper[4793]: I0127 20:05:51.156439 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:05:52 crc kubenswrapper[4793]: I0127 20:05:52.754303 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:05:52 crc kubenswrapper[4793]: I0127 20:05:52.754379 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:05:57 crc kubenswrapper[4793]: I0127 20:05:57.512427 4793 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-48bz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 27 20:05:57 crc kubenswrapper[4793]: I0127 20:05:57.513126 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 27 20:05:57 crc kubenswrapper[4793]: I0127 20:05:57.834313 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:05:57 crc kubenswrapper[4793]: I0127 20:05:57.834413 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:05:57 crc kubenswrapper[4793]: I0127 20:05:57.892401 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fc5bx" Jan 27 20:05:58 crc kubenswrapper[4793]: I0127 20:05:58.422052 4793 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zw89c container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 27 20:05:58 crc kubenswrapper[4793]: I0127 20:05:58.422125 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 27 20:06:04 crc kubenswrapper[4793]: I0127 20:06:04.842783 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 20:06:07 crc kubenswrapper[4793]: I0127 20:06:07.845860 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:07 crc kubenswrapper[4793]: I0127 20:06:07.846431 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.259524 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.265326 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290291 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:08 crc kubenswrapper[4793]: E0127 20:06:08.290539 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290567 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: E0127 20:06:08.290581 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11588ab1-689a-4227-a887-a57b945807a2" containerName="collect-profiles" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290587 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="11588ab1-689a-4227-a887-a57b945807a2" containerName="collect-profiles" Jan 27 20:06:08 crc kubenswrapper[4793]: E0127 20:06:08.290597 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290605 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: E0127 20:06:08.290617 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8d1e1f-e58d-4080-acdb-0e762f74e141" containerName="pruner" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290623 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8d1e1f-e58d-4080-acdb-0e762f74e141" containerName="pruner" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290717 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8d1e1f-e58d-4080-acdb-0e762f74e141" containerName="pruner" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290728 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290736 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" containerName="controller-manager" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.290746 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="11588ab1-689a-4227-a887-a57b945807a2" containerName="collect-profiles" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.291527 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.298515 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354118 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config\") pod \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354210 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config\") pod \"aa065492-723b-40dd-9259-1a4452804068\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354262 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrptt\" (UniqueName: \"kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt\") pod \"aa065492-723b-40dd-9259-1a4452804068\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354349 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca\") pod \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354372 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert\") pod \"aa065492-723b-40dd-9259-1a4452804068\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354412 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert\") pod \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354433 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles\") pod \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354499 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kc2rq\" (UniqueName: \"kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq\") pod \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\" (UID: \"470ff764-d3e6-48a6-aa1b-b4777a1d746f\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354637 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca\") pod \"aa065492-723b-40dd-9259-1a4452804068\" (UID: \"aa065492-723b-40dd-9259-1a4452804068\") " Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354847 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354890 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354949 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354967 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnj2r\" (UniqueName: \"kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.354996 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.356450 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config" (OuterVolumeSpecName: "config") pod "470ff764-d3e6-48a6-aa1b-b4777a1d746f" (UID: "470ff764-d3e6-48a6-aa1b-b4777a1d746f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.357093 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config" (OuterVolumeSpecName: "config") pod "aa065492-723b-40dd-9259-1a4452804068" (UID: "aa065492-723b-40dd-9259-1a4452804068"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.373447 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca" (OuterVolumeSpecName: "client-ca") pod "470ff764-d3e6-48a6-aa1b-b4777a1d746f" (UID: "470ff764-d3e6-48a6-aa1b-b4777a1d746f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.374465 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "470ff764-d3e6-48a6-aa1b-b4777a1d746f" (UID: "470ff764-d3e6-48a6-aa1b-b4777a1d746f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.375361 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca" (OuterVolumeSpecName: "client-ca") pod "aa065492-723b-40dd-9259-1a4452804068" (UID: "aa065492-723b-40dd-9259-1a4452804068"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.376682 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aa065492-723b-40dd-9259-1a4452804068" (UID: "aa065492-723b-40dd-9259-1a4452804068"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.376995 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq" (OuterVolumeSpecName: "kube-api-access-kc2rq") pod "470ff764-d3e6-48a6-aa1b-b4777a1d746f" (UID: "470ff764-d3e6-48a6-aa1b-b4777a1d746f"). InnerVolumeSpecName "kube-api-access-kc2rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.387099 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt" (OuterVolumeSpecName: "kube-api-access-lrptt") pod "aa065492-723b-40dd-9259-1a4452804068" (UID: "aa065492-723b-40dd-9259-1a4452804068"). InnerVolumeSpecName "kube-api-access-lrptt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.388295 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "470ff764-d3e6-48a6-aa1b-b4777a1d746f" (UID: "470ff764-d3e6-48a6-aa1b-b4777a1d746f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.455814 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456164 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnj2r\" (UniqueName: \"kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456197 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456262 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456293 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456383 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456394 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrptt\" (UniqueName: \"kubernetes.io/projected/aa065492-723b-40dd-9259-1a4452804068-kube-api-access-lrptt\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456404 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456413 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa065492-723b-40dd-9259-1a4452804068-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456421 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/470ff764-d3e6-48a6-aa1b-b4777a1d746f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456430 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456438 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kc2rq\" (UniqueName: \"kubernetes.io/projected/470ff764-d3e6-48a6-aa1b-b4777a1d746f-kube-api-access-kc2rq\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456446 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa065492-723b-40dd-9259-1a4452804068-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.456454 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/470ff764-d3e6-48a6-aa1b-b4777a1d746f-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.457050 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.457841 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.458286 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.460650 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.474001 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnj2r\" (UniqueName: \"kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r\") pod \"controller-manager-5f75859d97-v5k84\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.513046 4793 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-48bz5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.513121 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" podUID="aa065492-723b-40dd-9259-1a4452804068" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 20:06:08 crc kubenswrapper[4793]: I0127 20:06:08.649624 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.079467 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.079480 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5" event={"ID":"aa065492-723b-40dd-9259-1a4452804068","Type":"ContainerDied","Data":"f6c6682eafebfffd5f8bab70a7ee5d339586a794da80b16aa91bd439d3655510"} Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.079580 4793 scope.go:117] "RemoveContainer" containerID="dc3dde5dcd9427093a4dc20ba3845e9ba056034aa9ca5426676af71b1680e65e" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.081656 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" event={"ID":"470ff764-d3e6-48a6-aa1b-b4777a1d746f","Type":"ContainerDied","Data":"babb3039a55f0b4b8cefac07cc1e41129acd54134ec857fa7e818373cc2d3290"} Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.081695 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zw89c" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.114995 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.118732 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zw89c"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.131230 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.168090 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48bz5"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.305564 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.446628 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.447195 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.447264 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.450716 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.450904 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.451168 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.451308 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.451428 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.451616 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.522336 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.522476 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psngf\" (UniqueName: \"kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.522577 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.522664 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.623597 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.623675 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psngf\" (UniqueName: \"kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.623727 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.623786 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.624927 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.625902 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.750338 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psngf\" (UniqueName: \"kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.751850 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert\") pod \"route-controller-manager-6c9b4fd8c6-pmcp4\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.773823 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.813520 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="470ff764-d3e6-48a6-aa1b-b4777a1d746f" path="/var/lib/kubelet/pods/470ff764-d3e6-48a6-aa1b-b4777a1d746f/volumes" Jan 27 20:06:09 crc kubenswrapper[4793]: I0127 20:06:09.814133 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa065492-723b-40dd-9259-1a4452804068" path="/var/lib/kubelet/pods/aa065492-723b-40dd-9259-1a4452804068/volumes" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.301111 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.302014 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hb6cz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-tcslq_openshift-marketplace(5493a5b8-666b-4e96-8912-e8ddc28327fe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.303587 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-tcslq" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.322074 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.322213 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l8mtz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-ch6xq_openshift-marketplace(1809bb2d-a0ed-4679-ab68-27db1963e044): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:12 crc kubenswrapper[4793]: E0127 20:06:12.323392 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-ch6xq" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.022833 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.024213 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.031640 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.032191 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.032735 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.075848 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.075902 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.177195 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.177734 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.177847 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.196336 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:13 crc kubenswrapper[4793]: I0127 20:06:13.348210 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:15 crc kubenswrapper[4793]: E0127 20:06:15.447924 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-tcslq" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" Jan 27 20:06:15 crc kubenswrapper[4793]: E0127 20:06:15.447968 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-ch6xq" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" Jan 27 20:06:15 crc kubenswrapper[4793]: E0127 20:06:15.538167 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 27 20:06:15 crc kubenswrapper[4793]: E0127 20:06:15.538379 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x52cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-j7v8p_openshift-marketplace(ef02211f-9add-4072-aa2d-4df47b879c0d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:15 crc kubenswrapper[4793]: E0127 20:06:15.540742 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-j7v8p" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" Jan 27 20:06:17 crc kubenswrapper[4793]: E0127 20:06:17.123293 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-j7v8p" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" Jan 27 20:06:17 crc kubenswrapper[4793]: E0127 20:06:17.211113 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 27 20:06:17 crc kubenswrapper[4793]: E0127 20:06:17.211570 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n9btv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-jh9bl_openshift-marketplace(18834635-b900-480e-844b-4c075b169d4a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:17 crc kubenswrapper[4793]: E0127 20:06:17.212866 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-jh9bl" podUID="18834635-b900-480e-844b-4c075b169d4a" Jan 27 20:06:17 crc kubenswrapper[4793]: I0127 20:06:17.833504 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:17 crc kubenswrapper[4793]: I0127 20:06:17.833590 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.217646 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.219576 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.226962 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.245534 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.245588 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.245646 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.346793 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.346883 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.346930 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.346934 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.347028 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.372976 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access\") pod \"installer-9-crc\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.544691 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.793719 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-jh9bl" podUID="18834635-b900-480e-844b-4c075b169d4a" Jan 27 20:06:18 crc kubenswrapper[4793]: I0127 20:06:18.837447 4793 scope.go:117] "RemoveContainer" containerID="7db64802125589ae67628bf36b2a4093c519ff5fce18774bf9f93e889f120849" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.870909 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.871091 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sk97f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-qmlkz_openshift-marketplace(345c96d4-a84a-4d09-9d94-f68e4c3bff9b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.872272 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-qmlkz" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.920274 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.920620 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rgjqm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-n2ntn_openshift-marketplace(0bcb229d-7351-4a1f-9a61-8c54a7ee039c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.922409 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-n2ntn" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.957186 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.957315 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8qvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-lcdjd_openshift-marketplace(50ff3901-4109-4f4e-9933-20bccf83d99d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.958670 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-lcdjd" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.967611 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.967731 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vx95k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-2lmqb_openshift-marketplace(f7506fff-3cb5-42dd-80c3-203b1354c70d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:06:18 crc kubenswrapper[4793]: E0127 20:06:18.968978 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-2lmqb" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.159210 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mq8nr" event={"ID":"a043f31e-8e0a-41eb-a2ad-73f6d5795b0a","Type":"ContainerStarted","Data":"108ed3cd989b6bc9f45fa2e08f8917f72594f64b2c8eba93a54ba25115beb5bb"} Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.159529 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.159919 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.159960 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:19 crc kubenswrapper[4793]: E0127 20:06:19.168419 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-qmlkz" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" Jan 27 20:06:19 crc kubenswrapper[4793]: E0127 20:06:19.169363 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-2lmqb" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" Jan 27 20:06:19 crc kubenswrapper[4793]: E0127 20:06:19.169375 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-n2ntn" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" Jan 27 20:06:19 crc kubenswrapper[4793]: E0127 20:06:19.169501 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-lcdjd" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.302739 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:19 crc kubenswrapper[4793]: W0127 20:06:19.310677 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6df82742_4fac_4d73_8b8b_7d191320bd59.slice/crio-0f86e8f725dedc4b632e1e13e6605f0e9a825596a12f53c44cba56a858fd0dd5 WatchSource:0}: Error finding container 0f86e8f725dedc4b632e1e13e6605f0e9a825596a12f53c44cba56a858fd0dd5: Status 404 returned error can't find the container with id 0f86e8f725dedc4b632e1e13e6605f0e9a825596a12f53c44cba56a858fd0dd5 Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.424518 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.445902 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:19 crc kubenswrapper[4793]: I0127 20:06:19.460964 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 20:06:19 crc kubenswrapper[4793]: W0127 20:06:19.484024 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda8cf8395_bdf2_47d8_983d_6559fc2d994f.slice/crio-230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe WatchSource:0}: Error finding container 230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe: Status 404 returned error can't find the container with id 230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.695275 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0","Type":"ContainerStarted","Data":"40c80c871df4be0b6009974de1f4e57324262b23d9497bb8381a5b474319f686"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.696369 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0","Type":"ContainerStarted","Data":"27608e43d13c0bbe31b92c9bc1b28942a174002388b7e4b4d7063d9ac440fc06"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.697384 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a8cf8395-bdf2-47d8-983d-6559fc2d994f","Type":"ContainerStarted","Data":"2da32b334997b77ca111f88640bd48cfc82dbc77f2216abec2619535fbe503bc"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.697437 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a8cf8395-bdf2-47d8-983d-6559fc2d994f","Type":"ContainerStarted","Data":"230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.701227 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" event={"ID":"6df82742-4fac-4d73-8b8b-7d191320bd59","Type":"ContainerStarted","Data":"800d46f9308fed6cdcbaf40c2946e069a04997ba73eac5edf5b7801fae012c43"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.701259 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" event={"ID":"6df82742-4fac-4d73-8b8b-7d191320bd59","Type":"ContainerStarted","Data":"0f86e8f725dedc4b632e1e13e6605f0e9a825596a12f53c44cba56a858fd0dd5"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.701917 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.703114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" event={"ID":"91a44913-63ac-484f-8341-d021657036c3","Type":"ContainerStarted","Data":"336a9c2ae0f1ee7cda1b406f1ff5c770f8ed81f6734c2a7f557100d62b6a1f56"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.703146 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" event={"ID":"91a44913-63ac-484f-8341-d021657036c3","Type":"ContainerStarted","Data":"3cc22c6155c6ecc5b5ff20468eeb4d67f0de1609233fc70ec8be9fc91fe300e4"} Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.703266 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" podUID="91a44913-63ac-484f-8341-d021657036c3" containerName="controller-manager" containerID="cri-o://336a9c2ae0f1ee7cda1b406f1ff5c770f8ed81f6734c2a7f557100d62b6a1f56" gracePeriod=30 Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.703611 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.703618 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.704727 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.710659 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.713515 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.738607 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=7.738593821 podStartE2EDuration="7.738593821s" podCreationTimestamp="2026-01-27 20:06:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:20.737110281 +0000 UTC m=+206.127363437" watchObservedRunningTime="2026-01-27 20:06:20.738593821 +0000 UTC m=+206.128846977" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.740118 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.740113021 podStartE2EDuration="2.740113021s" podCreationTimestamp="2026-01-27 20:06:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:20.723390717 +0000 UTC m=+206.113643873" watchObservedRunningTime="2026-01-27 20:06:20.740113021 +0000 UTC m=+206.130366177" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.759937 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" podStartSLOduration=31.759919767 podStartE2EDuration="31.759919767s" podCreationTimestamp="2026-01-27 20:05:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:20.757230233 +0000 UTC m=+206.147483389" watchObservedRunningTime="2026-01-27 20:06:20.759919767 +0000 UTC m=+206.150172923" Jan 27 20:06:20 crc kubenswrapper[4793]: I0127 20:06:20.776146 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" podStartSLOduration=11.776126741 podStartE2EDuration="11.776126741s" podCreationTimestamp="2026-01-27 20:06:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:20.771446117 +0000 UTC m=+206.161699273" watchObservedRunningTime="2026-01-27 20:06:20.776126741 +0000 UTC m=+206.166379897" Jan 27 20:06:21 crc kubenswrapper[4793]: I0127 20:06:21.708539 4793 generic.go:334] "Generic (PLEG): container finished" podID="a8cf8395-bdf2-47d8-983d-6559fc2d994f" containerID="2da32b334997b77ca111f88640bd48cfc82dbc77f2216abec2619535fbe503bc" exitCode=0 Jan 27 20:06:21 crc kubenswrapper[4793]: I0127 20:06:21.708696 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a8cf8395-bdf2-47d8-983d-6559fc2d994f","Type":"ContainerDied","Data":"2da32b334997b77ca111f88640bd48cfc82dbc77f2216abec2619535fbe503bc"} Jan 27 20:06:21 crc kubenswrapper[4793]: I0127 20:06:21.710400 4793 generic.go:334] "Generic (PLEG): container finished" podID="91a44913-63ac-484f-8341-d021657036c3" containerID="336a9c2ae0f1ee7cda1b406f1ff5c770f8ed81f6734c2a7f557100d62b6a1f56" exitCode=0 Jan 27 20:06:21 crc kubenswrapper[4793]: I0127 20:06:21.710429 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" event={"ID":"91a44913-63ac-484f-8341-d021657036c3","Type":"ContainerDied","Data":"336a9c2ae0f1ee7cda1b406f1ff5c770f8ed81f6734c2a7f557100d62b6a1f56"} Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.049688 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.076281 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:22 crc kubenswrapper[4793]: E0127 20:06:22.076490 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a44913-63ac-484f-8341-d021657036c3" containerName="controller-manager" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.076501 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a44913-63ac-484f-8341-d021657036c3" containerName="controller-manager" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.076780 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="91a44913-63ac-484f-8341-d021657036c3" containerName="controller-manager" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.077118 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.082430 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116118 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca\") pod \"91a44913-63ac-484f-8341-d021657036c3\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116155 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert\") pod \"91a44913-63ac-484f-8341-d021657036c3\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116238 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnj2r\" (UniqueName: \"kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r\") pod \"91a44913-63ac-484f-8341-d021657036c3\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116258 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles\") pod \"91a44913-63ac-484f-8341-d021657036c3\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116288 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config\") pod \"91a44913-63ac-484f-8341-d021657036c3\" (UID: \"91a44913-63ac-484f-8341-d021657036c3\") " Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116445 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116488 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116522 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm5pz\" (UniqueName: \"kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.116574 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.117060 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca" (OuterVolumeSpecName: "client-ca") pod "91a44913-63ac-484f-8341-d021657036c3" (UID: "91a44913-63ac-484f-8341-d021657036c3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.117232 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config" (OuterVolumeSpecName: "config") pod "91a44913-63ac-484f-8341-d021657036c3" (UID: "91a44913-63ac-484f-8341-d021657036c3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.117474 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "91a44913-63ac-484f-8341-d021657036c3" (UID: "91a44913-63ac-484f-8341-d021657036c3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.123786 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r" (OuterVolumeSpecName: "kube-api-access-xnj2r") pod "91a44913-63ac-484f-8341-d021657036c3" (UID: "91a44913-63ac-484f-8341-d021657036c3"). InnerVolumeSpecName "kube-api-access-xnj2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.124797 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "91a44913-63ac-484f-8341-d021657036c3" (UID: "91a44913-63ac-484f-8341-d021657036c3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218453 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218570 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218598 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218645 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218683 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm5pz\" (UniqueName: \"kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218718 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91a44913-63ac-484f-8341-d021657036c3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218728 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnj2r\" (UniqueName: \"kubernetes.io/projected/91a44913-63ac-484f-8341-d021657036c3-kube-api-access-xnj2r\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218738 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218746 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.218754 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91a44913-63ac-484f-8341-d021657036c3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.219734 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.220212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.221113 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.223932 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.234627 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm5pz\" (UniqueName: \"kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz\") pod \"controller-manager-5b4bf97844-5lbjb\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.398429 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.637356 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:22 crc kubenswrapper[4793]: W0127 20:06:22.646368 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fea649c_e852_49f6_803b_9dced7eed97e.slice/crio-cb43c9614714fb4df1ceea0ffb74d712c504e7a56db635b2211614a034f501ed WatchSource:0}: Error finding container cb43c9614714fb4df1ceea0ffb74d712c504e7a56db635b2211614a034f501ed: Status 404 returned error can't find the container with id cb43c9614714fb4df1ceea0ffb74d712c504e7a56db635b2211614a034f501ed Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.718221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" event={"ID":"7fea649c-e852-49f6-803b-9dced7eed97e","Type":"ContainerStarted","Data":"cb43c9614714fb4df1ceea0ffb74d712c504e7a56db635b2211614a034f501ed"} Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.720718 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" event={"ID":"91a44913-63ac-484f-8341-d021657036c3","Type":"ContainerDied","Data":"3cc22c6155c6ecc5b5ff20468eeb4d67f0de1609233fc70ec8be9fc91fe300e4"} Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.720755 4793 scope.go:117] "RemoveContainer" containerID="336a9c2ae0f1ee7cda1b406f1ff5c770f8ed81f6734c2a7f557100d62b6a1f56" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.720860 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f75859d97-v5k84" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.751748 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.753271 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.753320 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.753367 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.754116 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.754186 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b" gracePeriod=600 Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.754218 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5f75859d97-v5k84"] Jan 27 20:06:22 crc kubenswrapper[4793]: I0127 20:06:22.926118 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.030154 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir\") pod \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.030392 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a8cf8395-bdf2-47d8-983d-6559fc2d994f" (UID: "a8cf8395-bdf2-47d8-983d-6559fc2d994f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.030649 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access\") pod \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\" (UID: \"a8cf8395-bdf2-47d8-983d-6559fc2d994f\") " Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.030891 4793 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.042060 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a8cf8395-bdf2-47d8-983d-6559fc2d994f" (UID: "a8cf8395-bdf2-47d8-983d-6559fc2d994f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:23 crc kubenswrapper[4793]: I0127 20:06:23.131446 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a8cf8395-bdf2-47d8-983d-6559fc2d994f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.067276 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91a44913-63ac-484f-8341-d021657036c3" path="/var/lib/kubelet/pods/91a44913-63ac-484f-8341-d021657036c3/volumes" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.083417 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b" exitCode=0 Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.083606 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b"} Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.083647 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085"} Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.090664 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.090750 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a8cf8395-bdf2-47d8-983d-6559fc2d994f","Type":"ContainerDied","Data":"230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe"} Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.090786 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="230af1d502c35460b3022de86146df0c3b912e384a9a920c5a6477b73e40b4fe" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.096213 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" event={"ID":"7fea649c-e852-49f6-803b-9dced7eed97e","Type":"ContainerStarted","Data":"a8c1f2c41212011509c1e110364e603697d04223729c11772238ea23389866d9"} Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.097406 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.113213 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:24 crc kubenswrapper[4793]: I0127 20:06:24.141706 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" podStartSLOduration=15.141689299 podStartE2EDuration="15.141689299s" podCreationTimestamp="2026-01-27 20:06:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:24.13625778 +0000 UTC m=+209.526510936" watchObservedRunningTime="2026-01-27 20:06:24.141689299 +0000 UTC m=+209.531942455" Jan 27 20:06:27 crc kubenswrapper[4793]: I0127 20:06:27.399735 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:06:27 crc kubenswrapper[4793]: I0127 20:06:27.884916 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:27 crc kubenswrapper[4793]: I0127 20:06:27.884948 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:27 crc kubenswrapper[4793]: I0127 20:06:27.885174 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:27 crc kubenswrapper[4793]: I0127 20:06:27.885365 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:29 crc kubenswrapper[4793]: I0127 20:06:29.282751 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerStarted","Data":"423ed445f92cf65c218b6d1b3e342ab5f05f9fb9c1d68833c73af6490a1bb6df"} Jan 27 20:06:32 crc kubenswrapper[4793]: I0127 20:06:32.368951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerStarted","Data":"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c"} Jan 27 20:06:34 crc kubenswrapper[4793]: I0127 20:06:34.386879 4793 generic.go:334] "Generic (PLEG): container finished" podID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerID="bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c" exitCode=0 Jan 27 20:06:34 crc kubenswrapper[4793]: I0127 20:06:34.387199 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerDied","Data":"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c"} Jan 27 20:06:36 crc kubenswrapper[4793]: I0127 20:06:36.406517 4793 generic.go:334] "Generic (PLEG): container finished" podID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerID="423ed445f92cf65c218b6d1b3e342ab5f05f9fb9c1d68833c73af6490a1bb6df" exitCode=0 Jan 27 20:06:36 crc kubenswrapper[4793]: I0127 20:06:36.406577 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerDied","Data":"423ed445f92cf65c218b6d1b3e342ab5f05f9fb9c1d68833c73af6490a1bb6df"} Jan 27 20:06:37 crc kubenswrapper[4793]: I0127 20:06:37.832806 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:37 crc kubenswrapper[4793]: I0127 20:06:37.832851 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 27 20:06:37 crc kubenswrapper[4793]: I0127 20:06:37.833444 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:37 crc kubenswrapper[4793]: I0127 20:06:37.833590 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.605722 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerStarted","Data":"6d88a635cb013c5b99481cd6f34b491c61da13b41ed0d75cb1be2740101857ff"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.608435 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerStarted","Data":"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.610985 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerStarted","Data":"df2647518f409ee73582987083cdf0650745013fd8c7ca509578caaf2187ae59"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.612899 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerStarted","Data":"aeefc2f50d62af58885d28a0eea62a181bd37cb8dd7259ed11851536ef92e34a"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.615103 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerStarted","Data":"ba3fcb727ed2dc071079d6fc21c91852c4bc5deff9b3c030c67a3eae4f652ebc"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.617650 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerStarted","Data":"0141f24bdb4c844649fb8c03b003a6e5065a11bde6a2176ca5ae6be81beea550"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.621086 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerStarted","Data":"d2d2ba29b2fac29092f29c989f0f821d9590dad38119b2ef709c331188da3e73"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.623775 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerStarted","Data":"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340"} Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.703139 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.745346 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.778787 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ch6xq" podStartSLOduration=4.858578331 podStartE2EDuration="1m11.778762026s" podCreationTimestamp="2026-01-27 20:05:33 +0000 UTC" firstStartedPulling="2026-01-27 20:05:35.977844715 +0000 UTC m=+161.368097871" lastFinishedPulling="2026-01-27 20:06:42.89802841 +0000 UTC m=+228.288281566" observedRunningTime="2026-01-27 20:06:44.776193575 +0000 UTC m=+230.166446751" watchObservedRunningTime="2026-01-27 20:06:44.778762026 +0000 UTC m=+230.169015182" Jan 27 20:06:44 crc kubenswrapper[4793]: I0127 20:06:44.778938 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j7v8p" podStartSLOduration=5.4265629 podStartE2EDuration="1m11.7789328s" podCreationTimestamp="2026-01-27 20:05:33 +0000 UTC" firstStartedPulling="2026-01-27 20:05:37.139763317 +0000 UTC m=+162.530016473" lastFinishedPulling="2026-01-27 20:06:43.492133217 +0000 UTC m=+228.882386373" observedRunningTime="2026-01-27 20:06:44.742517652 +0000 UTC m=+230.132770808" watchObservedRunningTime="2026-01-27 20:06:44.7789328 +0000 UTC m=+230.169185966" Jan 27 20:06:46 crc kubenswrapper[4793]: I0127 20:06:46.488964 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j7v8p" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="registry-server" probeResult="failure" output=< Jan 27 20:06:46 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:06:46 crc kubenswrapper[4793]: > Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.690101 4793 generic.go:334] "Generic (PLEG): container finished" podID="18834635-b900-480e-844b-4c075b169d4a" containerID="753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240" exitCode=0 Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.690175 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerDied","Data":"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240"} Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.693540 4793 generic.go:334] "Generic (PLEG): container finished" podID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerID="df2647518f409ee73582987083cdf0650745013fd8c7ca509578caaf2187ae59" exitCode=0 Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.693612 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerDied","Data":"df2647518f409ee73582987083cdf0650745013fd8c7ca509578caaf2187ae59"} Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.698695 4793 generic.go:334] "Generic (PLEG): container finished" podID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerID="0141f24bdb4c844649fb8c03b003a6e5065a11bde6a2176ca5ae6be81beea550" exitCode=0 Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.698770 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerDied","Data":"0141f24bdb4c844649fb8c03b003a6e5065a11bde6a2176ca5ae6be81beea550"} Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.702785 4793 generic.go:334] "Generic (PLEG): container finished" podID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerID="d2d2ba29b2fac29092f29c989f0f821d9590dad38119b2ef709c331188da3e73" exitCode=0 Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.702815 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerDied","Data":"d2d2ba29b2fac29092f29c989f0f821d9590dad38119b2ef709c331188da3e73"} Jan 27 20:06:47 crc kubenswrapper[4793]: I0127 20:06:47.925230 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-mq8nr" Jan 27 20:06:48 crc kubenswrapper[4793]: I0127 20:06:48.713759 4793 generic.go:334] "Generic (PLEG): container finished" podID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerID="aeefc2f50d62af58885d28a0eea62a181bd37cb8dd7259ed11851536ef92e34a" exitCode=0 Jan 27 20:06:48 crc kubenswrapper[4793]: I0127 20:06:48.713833 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerDied","Data":"aeefc2f50d62af58885d28a0eea62a181bd37cb8dd7259ed11851536ef92e34a"} Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.324532 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.324776 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" podUID="7fea649c-e852-49f6-803b-9dced7eed97e" containerName="controller-manager" containerID="cri-o://a8c1f2c41212011509c1e110364e603697d04223729c11772238ea23389866d9" gracePeriod=30 Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.668894 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.669450 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerName="route-controller-manager" containerID="cri-o://800d46f9308fed6cdcbaf40c2946e069a04997ba73eac5edf5b7801fae012c43" gracePeriod=30 Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.775797 4793 patch_prober.go:28] interesting pod/route-controller-manager-6c9b4fd8c6-pmcp4 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: connect: connection refused" start-of-body= Jan 27 20:06:49 crc kubenswrapper[4793]: I0127 20:06:49.775858 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: connect: connection refused" Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.725310 4793 generic.go:334] "Generic (PLEG): container finished" podID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerID="800d46f9308fed6cdcbaf40c2946e069a04997ba73eac5edf5b7801fae012c43" exitCode=0 Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.725389 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" event={"ID":"6df82742-4fac-4d73-8b8b-7d191320bd59","Type":"ContainerDied","Data":"800d46f9308fed6cdcbaf40c2946e069a04997ba73eac5edf5b7801fae012c43"} Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.727029 4793 generic.go:334] "Generic (PLEG): container finished" podID="7fea649c-e852-49f6-803b-9dced7eed97e" containerID="a8c1f2c41212011509c1e110364e603697d04223729c11772238ea23389866d9" exitCode=0 Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.727087 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" event={"ID":"7fea649c-e852-49f6-803b-9dced7eed97e","Type":"ContainerDied","Data":"a8c1f2c41212011509c1e110364e603697d04223729c11772238ea23389866d9"} Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.728931 4793 generic.go:334] "Generic (PLEG): container finished" podID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerID="ba3fcb727ed2dc071079d6fc21c91852c4bc5deff9b3c030c67a3eae4f652ebc" exitCode=0 Jan 27 20:06:50 crc kubenswrapper[4793]: I0127 20:06:50.728952 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerDied","Data":"ba3fcb727ed2dc071079d6fc21c91852c4bc5deff9b3c030c67a3eae4f652ebc"} Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.492630 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.528480 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:06:51 crc kubenswrapper[4793]: E0127 20:06:51.531134 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerName="route-controller-manager" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.531173 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerName="route-controller-manager" Jan 27 20:06:51 crc kubenswrapper[4793]: E0127 20:06:51.531213 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8cf8395-bdf2-47d8-983d-6559fc2d994f" containerName="pruner" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.531221 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8cf8395-bdf2-47d8-983d-6559fc2d994f" containerName="pruner" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.531517 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" containerName="route-controller-manager" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.531563 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8cf8395-bdf2-47d8-983d-6559fc2d994f" containerName="pruner" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.532140 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.547106 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.654670 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert\") pod \"6df82742-4fac-4d73-8b8b-7d191320bd59\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.654769 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca\") pod \"6df82742-4fac-4d73-8b8b-7d191320bd59\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.654906 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config\") pod \"6df82742-4fac-4d73-8b8b-7d191320bd59\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.654963 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psngf\" (UniqueName: \"kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf\") pod \"6df82742-4fac-4d73-8b8b-7d191320bd59\" (UID: \"6df82742-4fac-4d73-8b8b-7d191320bd59\") " Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.655288 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.655377 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl88l\" (UniqueName: \"kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.655415 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.655450 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.655808 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config" (OuterVolumeSpecName: "config") pod "6df82742-4fac-4d73-8b8b-7d191320bd59" (UID: "6df82742-4fac-4d73-8b8b-7d191320bd59"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.656255 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca" (OuterVolumeSpecName: "client-ca") pod "6df82742-4fac-4d73-8b8b-7d191320bd59" (UID: "6df82742-4fac-4d73-8b8b-7d191320bd59"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.659686 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6df82742-4fac-4d73-8b8b-7d191320bd59" (UID: "6df82742-4fac-4d73-8b8b-7d191320bd59"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.659896 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf" (OuterVolumeSpecName: "kube-api-access-psngf") pod "6df82742-4fac-4d73-8b8b-7d191320bd59" (UID: "6df82742-4fac-4d73-8b8b-7d191320bd59"). InnerVolumeSpecName "kube-api-access-psngf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.736830 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" event={"ID":"6df82742-4fac-4d73-8b8b-7d191320bd59","Type":"ContainerDied","Data":"0f86e8f725dedc4b632e1e13e6605f0e9a825596a12f53c44cba56a858fd0dd5"} Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.736890 4793 scope.go:117] "RemoveContainer" containerID="800d46f9308fed6cdcbaf40c2946e069a04997ba73eac5edf5b7801fae012c43" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.736886 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756492 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl88l\" (UniqueName: \"kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756568 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756609 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756656 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756711 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756727 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psngf\" (UniqueName: \"kubernetes.io/projected/6df82742-4fac-4d73-8b8b-7d191320bd59-kube-api-access-psngf\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756740 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6df82742-4fac-4d73-8b8b-7d191320bd59-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.756752 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6df82742-4fac-4d73-8b8b-7d191320bd59-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.757904 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.758043 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.760472 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.770487 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.774312 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9b4fd8c6-pmcp4"] Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.777585 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl88l\" (UniqueName: \"kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l\") pod \"route-controller-manager-6b4d8cc5bd-c7h8r\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.813782 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6df82742-4fac-4d73-8b8b-7d191320bd59" path="/var/lib/kubelet/pods/6df82742-4fac-4d73-8b8b-7d191320bd59/volumes" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.865300 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:51 crc kubenswrapper[4793]: I0127 20:06:51.942166 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.060096 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm5pz\" (UniqueName: \"kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz\") pod \"7fea649c-e852-49f6-803b-9dced7eed97e\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.060250 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config\") pod \"7fea649c-e852-49f6-803b-9dced7eed97e\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.060492 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles\") pod \"7fea649c-e852-49f6-803b-9dced7eed97e\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.060559 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert\") pod \"7fea649c-e852-49f6-803b-9dced7eed97e\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.060650 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca\") pod \"7fea649c-e852-49f6-803b-9dced7eed97e\" (UID: \"7fea649c-e852-49f6-803b-9dced7eed97e\") " Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.061217 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7fea649c-e852-49f6-803b-9dced7eed97e" (UID: "7fea649c-e852-49f6-803b-9dced7eed97e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.061233 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca" (OuterVolumeSpecName: "client-ca") pod "7fea649c-e852-49f6-803b-9dced7eed97e" (UID: "7fea649c-e852-49f6-803b-9dced7eed97e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.061403 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config" (OuterVolumeSpecName: "config") pod "7fea649c-e852-49f6-803b-9dced7eed97e" (UID: "7fea649c-e852-49f6-803b-9dced7eed97e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.062990 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7fea649c-e852-49f6-803b-9dced7eed97e" (UID: "7fea649c-e852-49f6-803b-9dced7eed97e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.063038 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz" (OuterVolumeSpecName: "kube-api-access-sm5pz") pod "7fea649c-e852-49f6-803b-9dced7eed97e" (UID: "7fea649c-e852-49f6-803b-9dced7eed97e"). InnerVolumeSpecName "kube-api-access-sm5pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.162537 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm5pz\" (UniqueName: \"kubernetes.io/projected/7fea649c-e852-49f6-803b-9dced7eed97e-kube-api-access-sm5pz\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.162589 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.162599 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.162607 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7fea649c-e852-49f6-803b-9dced7eed97e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.162617 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7fea649c-e852-49f6-803b-9dced7eed97e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.366317 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:06:52 crc kubenswrapper[4793]: W0127 20:06:52.373804 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf14a7294_c538_4d39_abf5_b286d7f08659.slice/crio-ee416f8b36cb58bcd200b0079380dff6837b9023dce93cab466a975747492789 WatchSource:0}: Error finding container ee416f8b36cb58bcd200b0079380dff6837b9023dce93cab466a975747492789: Status 404 returned error can't find the container with id ee416f8b36cb58bcd200b0079380dff6837b9023dce93cab466a975747492789 Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.655615 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" containerID="cri-o://1e311a6c539e7f56e08fd9bd59cbd97141fd741f6c521abe69a84ca2bfb157f6" gracePeriod=15 Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.744939 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" event={"ID":"7fea649c-e852-49f6-803b-9dced7eed97e","Type":"ContainerDied","Data":"cb43c9614714fb4df1ceea0ffb74d712c504e7a56db635b2211614a034f501ed"} Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.744998 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b4bf97844-5lbjb" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.745039 4793 scope.go:117] "RemoveContainer" containerID="a8c1f2c41212011509c1e110364e603697d04223729c11772238ea23389866d9" Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.747771 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" event={"ID":"f14a7294-c538-4d39-abf5-b286d7f08659","Type":"ContainerStarted","Data":"ee416f8b36cb58bcd200b0079380dff6837b9023dce93cab466a975747492789"} Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.774517 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:52 crc kubenswrapper[4793]: I0127 20:06:52.779228 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5b4bf97844-5lbjb"] Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.466360 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.466728 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.716947 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.757332 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerStarted","Data":"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2"} Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.760233 4793 generic.go:334] "Generic (PLEG): container finished" podID="41a53f60-6551-47cf-a063-02a42f9983e9" containerID="1e311a6c539e7f56e08fd9bd59cbd97141fd741f6c521abe69a84ca2bfb157f6" exitCode=0 Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.760319 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" event={"ID":"41a53f60-6551-47cf-a063-02a42f9983e9","Type":"ContainerDied","Data":"1e311a6c539e7f56e08fd9bd59cbd97141fd741f6c521abe69a84ca2bfb157f6"} Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.782748 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jh9bl" podStartSLOduration=4.384265936 podStartE2EDuration="1m22.782722638s" podCreationTimestamp="2026-01-27 20:05:31 +0000 UTC" firstStartedPulling="2026-01-27 20:05:33.731621509 +0000 UTC m=+159.121874665" lastFinishedPulling="2026-01-27 20:06:52.130078211 +0000 UTC m=+237.520331367" observedRunningTime="2026-01-27 20:06:53.778008204 +0000 UTC m=+239.168261350" watchObservedRunningTime="2026-01-27 20:06:53.782722638 +0000 UTC m=+239.172975794" Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.810639 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fea649c-e852-49f6-803b-9dced7eed97e" path="/var/lib/kubelet/pods/7fea649c-e852-49f6-803b-9dced7eed97e/volumes" Jan 27 20:06:53 crc kubenswrapper[4793]: I0127 20:06:53.811077 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.458074 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:06:54 crc kubenswrapper[4793]: E0127 20:06:54.458281 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fea649c-e852-49f6-803b-9dced7eed97e" containerName="controller-manager" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.458293 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fea649c-e852-49f6-803b-9dced7eed97e" containerName="controller-manager" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.458391 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fea649c-e852-49f6-803b-9dced7eed97e" containerName="controller-manager" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.458806 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.466127 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.466651 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.470255 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.470825 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.470839 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.472204 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.472508 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.476013 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.595368 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.595883 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx9tn\" (UniqueName: \"kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.596007 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.596041 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.596092 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.697371 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx9tn\" (UniqueName: \"kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.697433 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.697451 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.697469 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.697564 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.699023 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.699230 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.699528 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.704339 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.755048 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx9tn\" (UniqueName: \"kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn\") pod \"controller-manager-6c84bb8bf6-rnx9g\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.756745 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.779908 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:54 crc kubenswrapper[4793]: I0127 20:06:54.799728 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:06:55 crc kubenswrapper[4793]: I0127 20:06:55.952963 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:06:55 crc kubenswrapper[4793]: I0127 20:06:55.954007 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ch6xq" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="registry-server" containerID="cri-o://a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340" gracePeriod=2 Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.311453 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424056 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424143 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424183 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424203 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424221 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424251 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424282 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424304 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424351 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424383 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sz5fl\" (UniqueName: \"kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424428 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424455 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424514 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424533 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424585 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session\") pod \"41a53f60-6551-47cf-a063-02a42f9983e9\" (UID: \"41a53f60-6551-47cf-a063-02a42f9983e9\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.424828 4793 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41a53f60-6551-47cf-a063-02a42f9983e9-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.425008 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.425080 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.425102 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.427576 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.442903 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.443627 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.443908 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl" (OuterVolumeSpecName: "kube-api-access-sz5fl") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "kube-api-access-sz5fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.443917 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.444116 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.444420 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.444621 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.444856 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.445264 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "41a53f60-6551-47cf-a063-02a42f9983e9" (UID: "41a53f60-6551-47cf-a063-02a42f9983e9"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548597 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548875 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548886 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548896 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548907 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548918 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548927 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sz5fl\" (UniqueName: \"kubernetes.io/projected/41a53f60-6551-47cf-a063-02a42f9983e9-kube-api-access-sz5fl\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548938 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548947 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548956 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548972 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548985 4793 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41a53f60-6551-47cf-a063-02a42f9983e9-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.548994 4793 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41a53f60-6551-47cf-a063-02a42f9983e9-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.636784 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.750995 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content\") pod \"1809bb2d-a0ed-4679-ab68-27db1963e044\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.751121 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities\") pod \"1809bb2d-a0ed-4679-ab68-27db1963e044\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.751465 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8mtz\" (UniqueName: \"kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz\") pod \"1809bb2d-a0ed-4679-ab68-27db1963e044\" (UID: \"1809bb2d-a0ed-4679-ab68-27db1963e044\") " Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.754763 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities" (OuterVolumeSpecName: "utilities") pod "1809bb2d-a0ed-4679-ab68-27db1963e044" (UID: "1809bb2d-a0ed-4679-ab68-27db1963e044"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.759955 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz" (OuterVolumeSpecName: "kube-api-access-l8mtz") pod "1809bb2d-a0ed-4679-ab68-27db1963e044" (UID: "1809bb2d-a0ed-4679-ab68-27db1963e044"). InnerVolumeSpecName "kube-api-access-l8mtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.781411 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1809bb2d-a0ed-4679-ab68-27db1963e044" (UID: "1809bb2d-a0ed-4679-ab68-27db1963e044"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.823526 4793 generic.go:334] "Generic (PLEG): container finished" podID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerID="a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340" exitCode=0 Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.823750 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerDied","Data":"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340"} Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.823807 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ch6xq" event={"ID":"1809bb2d-a0ed-4679-ab68-27db1963e044","Type":"ContainerDied","Data":"050dabdd8984fbf1c4262c9fedf51f0f23cd23563e289d070b4d335afc59ff7d"} Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.823848 4793 scope.go:117] "RemoveContainer" containerID="a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.824048 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ch6xq" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.854526 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" event={"ID":"41a53f60-6551-47cf-a063-02a42f9983e9","Type":"ContainerDied","Data":"794a5bc3933c6a849d156be56c4fed003d2e841cd031d138ed4b868feb7b3960"} Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.854752 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jtmmw" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.857393 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.857600 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1809bb2d-a0ed-4679-ab68-27db1963e044-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.857703 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8mtz\" (UniqueName: \"kubernetes.io/projected/1809bb2d-a0ed-4679-ab68-27db1963e044-kube-api-access-l8mtz\") on node \"crc\" DevicePath \"\"" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.864737 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.867756 4793 scope.go:117] "RemoveContainer" containerID="bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.899536 4793 scope.go:117] "RemoveContainer" containerID="2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.925025 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.928863 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ch6xq"] Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.948998 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.962451 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jtmmw"] Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.963042 4793 scope.go:117] "RemoveContainer" containerID="a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340" Jan 27 20:06:56 crc kubenswrapper[4793]: E0127 20:06:56.966355 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340\": container with ID starting with a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340 not found: ID does not exist" containerID="a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.966428 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340"} err="failed to get container status \"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340\": rpc error: code = NotFound desc = could not find container \"a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340\": container with ID starting with a0072acaba4b1d1b5532f589f240025722174930575956e7949c9f08f147b340 not found: ID does not exist" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.966487 4793 scope.go:117] "RemoveContainer" containerID="bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c" Jan 27 20:06:56 crc kubenswrapper[4793]: E0127 20:06:56.967374 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c\": container with ID starting with bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c not found: ID does not exist" containerID="bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.967419 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c"} err="failed to get container status \"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c\": rpc error: code = NotFound desc = could not find container \"bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c\": container with ID starting with bcba1d330400e9060101c4f09e803c9c9debc8ac36d035c9287026cc40d8974c not found: ID does not exist" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.967466 4793 scope.go:117] "RemoveContainer" containerID="2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e" Jan 27 20:06:56 crc kubenswrapper[4793]: E0127 20:06:56.967970 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e\": container with ID starting with 2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e not found: ID does not exist" containerID="2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.968012 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e"} err="failed to get container status \"2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e\": rpc error: code = NotFound desc = could not find container \"2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e\": container with ID starting with 2a7eae65edfe595dd52be60556b19f4cd77ce63d5bc822c657ca449057691a3e not found: ID does not exist" Jan 27 20:06:56 crc kubenswrapper[4793]: I0127 20:06:56.968043 4793 scope.go:117] "RemoveContainer" containerID="1e311a6c539e7f56e08fd9bd59cbd97141fd741f6c521abe69a84ca2bfb157f6" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.811172 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" path="/var/lib/kubelet/pods/1809bb2d-a0ed-4679-ab68-27db1963e044/volumes" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.811840 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" path="/var/lib/kubelet/pods/41a53f60-6551-47cf-a063-02a42f9983e9/volumes" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.860665 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerStarted","Data":"a6c7e702fdf6fd42a58962286b4e81951b4f6c5e9fff05286a95c2d15d341835"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.863209 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerStarted","Data":"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.863235 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerStarted","Data":"357ba6d3067ad603003a59ade0266e244fd9a32dc089be9b623d56866597b397"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.864374 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.865442 4793 patch_prober.go:28] interesting pod/controller-manager-6c84bb8bf6-rnx9g container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.865470 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.866166 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" event={"ID":"f14a7294-c538-4d39-abf5-b286d7f08659","Type":"ContainerStarted","Data":"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.866273 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.868385 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerStarted","Data":"a53da694149f5f95c0e5e85c152e8f829aba24dcb7c1acac97c30af331a73b63"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.870214 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerStarted","Data":"93debde36ad152a78d5a5e980c5a867675829b0ae7c0fda8786bdb7882b5ad77"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.875539 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerStarted","Data":"a087f85b6c2bb28a2484f4779411ca8ce7d91b9e49012a8e1da1c405f2caf8dc"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.875660 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.877452 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerStarted","Data":"6628af0c6148bca8f585541496e94f0e3e256f6329be0fdc93453d1d345d1b98"} Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.915857 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qmlkz" podStartSLOduration=5.357861607 podStartE2EDuration="1m26.91583947s" podCreationTimestamp="2026-01-27 20:05:31 +0000 UTC" firstStartedPulling="2026-01-27 20:05:34.931655147 +0000 UTC m=+160.321908303" lastFinishedPulling="2026-01-27 20:06:56.48963301 +0000 UTC m=+241.879886166" observedRunningTime="2026-01-27 20:06:57.887948162 +0000 UTC m=+243.278201338" watchObservedRunningTime="2026-01-27 20:06:57.91583947 +0000 UTC m=+243.306092626" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.916356 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2lmqb" podStartSLOduration=4.403525711 podStartE2EDuration="1m23.916352471s" podCreationTimestamp="2026-01-27 20:05:34 +0000 UTC" firstStartedPulling="2026-01-27 20:05:37.139148065 +0000 UTC m=+162.529401221" lastFinishedPulling="2026-01-27 20:06:56.651974825 +0000 UTC m=+242.042227981" observedRunningTime="2026-01-27 20:06:57.914298489 +0000 UTC m=+243.304551645" watchObservedRunningTime="2026-01-27 20:06:57.916352471 +0000 UTC m=+243.306605617" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.975743 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" podStartSLOduration=8.975725637 podStartE2EDuration="8.975725637s" podCreationTimestamp="2026-01-27 20:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:57.947277499 +0000 UTC m=+243.337530655" watchObservedRunningTime="2026-01-27 20:06:57.975725637 +0000 UTC m=+243.365978793" Jan 27 20:06:57 crc kubenswrapper[4793]: I0127 20:06:57.976483 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tcslq" podStartSLOduration=5.744232125 podStartE2EDuration="1m25.976471472s" podCreationTimestamp="2026-01-27 20:05:32 +0000 UTC" firstStartedPulling="2026-01-27 20:05:36.003942337 +0000 UTC m=+161.394195493" lastFinishedPulling="2026-01-27 20:06:56.236181684 +0000 UTC m=+241.626434840" observedRunningTime="2026-01-27 20:06:57.975491453 +0000 UTC m=+243.365744629" watchObservedRunningTime="2026-01-27 20:06:57.976471472 +0000 UTC m=+243.366724628" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.005972 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" podStartSLOduration=9.005950032 podStartE2EDuration="9.005950032s" podCreationTimestamp="2026-01-27 20:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:06:58.004982502 +0000 UTC m=+243.395235668" watchObservedRunningTime="2026-01-27 20:06:58.005950032 +0000 UTC m=+243.396203188" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.024325 4793 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.025068 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="registry-server" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025100 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="registry-server" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.025128 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025138 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.025147 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="extract-utilities" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025155 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="extract-utilities" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.025167 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="extract-content" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025175 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="extract-content" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025360 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a53f60-6551-47cf-a063-02a42f9983e9" containerName="oauth-openshift" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.025381 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1809bb2d-a0ed-4679-ab68-27db1963e044" containerName="registry-server" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026072 4793 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026327 4793 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026515 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23" gracePeriod=15 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026753 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522" gracePeriod=15 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026827 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23" gracePeriod=15 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026802 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c" gracePeriod=15 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.026907 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b" gracePeriod=15 Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027140 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027158 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027162 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027171 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027180 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027193 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027200 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027212 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027219 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027230 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027240 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027253 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027260 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027269 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027281 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.027289 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027296 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027539 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027578 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027592 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027605 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027614 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027622 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.027940 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.055917 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lcdjd" podStartSLOduration=6.568207385 podStartE2EDuration="1m28.055889939s" podCreationTimestamp="2026-01-27 20:05:30 +0000 UTC" firstStartedPulling="2026-01-27 20:05:35.000804603 +0000 UTC m=+160.391057769" lastFinishedPulling="2026-01-27 20:06:56.488487167 +0000 UTC m=+241.878740323" observedRunningTime="2026-01-27 20:06:58.047288468 +0000 UTC m=+243.437541624" watchObservedRunningTime="2026-01-27 20:06:58.055889939 +0000 UTC m=+243.446143095" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.079819 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n2ntn" podStartSLOduration=4.305872974 podStartE2EDuration="1m28.079801987s" podCreationTimestamp="2026-01-27 20:05:30 +0000 UTC" firstStartedPulling="2026-01-27 20:05:32.715779239 +0000 UTC m=+158.106032405" lastFinishedPulling="2026-01-27 20:06:56.489708262 +0000 UTC m=+241.879961418" observedRunningTime="2026-01-27 20:06:58.074743557 +0000 UTC m=+243.464996703" watchObservedRunningTime="2026-01-27 20:06:58.079801987 +0000 UTC m=+243.470055143" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.082507 4793 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.092401 4793 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.209791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.209865 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.209956 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.210045 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.210071 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.210095 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.210116 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.210187 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310732 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310801 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310846 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310870 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310901 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310909 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310936 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310954 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310958 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310992 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.310995 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.311026 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.311060 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.311063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.311011 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.311078 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.393085 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:58 crc kubenswrapper[4793]: W0127 20:06:58.415647 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-dd80168af1e7465f6d8c17f1a65dbd4eb10ab7b37b37d7c57f52c9e1bb593fcf WatchSource:0}: Error finding container dd80168af1e7465f6d8c17f1a65dbd4eb10ab7b37b37d7c57f52c9e1bb593fcf: Status 404 returned error can't find the container with id dd80168af1e7465f6d8c17f1a65dbd4eb10ab7b37b37d7c57f52c9e1bb593fcf Jan 27 20:06:58 crc kubenswrapper[4793]: E0127 20:06:58.418466 4793 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.238:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eaf45ea119087 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,LastTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.885934 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.887502 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.888309 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23" exitCode=0 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.888345 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b" exitCode=0 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.888355 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522" exitCode=0 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.888364 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c" exitCode=2 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.888404 4793 scope.go:117] "RemoveContainer" containerID="c778234bae415e29213b65f04968887f36ee6b9ec47162946bbd71e15b32b8da" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.890970 4793 generic.go:334] "Generic (PLEG): container finished" podID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" containerID="40c80c871df4be0b6009974de1f4e57324262b23d9497bb8381a5b474319f686" exitCode=0 Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.891037 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0","Type":"ContainerDied","Data":"40c80c871df4be0b6009974de1f4e57324262b23d9497bb8381a5b474319f686"} Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.891695 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.892082 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dd80168af1e7465f6d8c17f1a65dbd4eb10ab7b37b37d7c57f52c9e1bb593fcf"} Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.899036 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.899600 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:06:58 crc kubenswrapper[4793]: I0127 20:06:58.899987 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:06:59 crc kubenswrapper[4793]: I0127 20:06:59.899364 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 20:06:59 crc kubenswrapper[4793]: I0127 20:06:59.901708 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"a48b2acf599910678953475595199d8907e6ea5f0eaf9dd9efe4650149da85ee"} Jan 27 20:06:59 crc kubenswrapper[4793]: E0127 20:06:59.902788 4793 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:06:59 crc kubenswrapper[4793]: I0127 20:06:59.902827 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:06:59 crc kubenswrapper[4793]: I0127 20:06:59.903112 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.190849 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.191841 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.192148 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.337418 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir\") pod \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.337756 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access\") pod \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.337579 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" (UID: "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.337821 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock\") pod \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\" (UID: \"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0\") " Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.337954 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock" (OuterVolumeSpecName: "var-lock") pod "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" (UID: "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.338068 4793 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.338084 4793 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-var-lock\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.354306 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" (UID: "4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.439069 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.911465 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0","Type":"ContainerDied","Data":"27608e43d13c0bbe31b92c9bc1b28942a174002388b7e4b4d7063d9ac440fc06"} Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.911533 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27608e43d13c0bbe31b92c9bc1b28942a174002388b7e4b4d7063d9ac440fc06" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.911570 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 20:07:00 crc kubenswrapper[4793]: E0127 20:07:00.912487 4793 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.925419 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:00 crc kubenswrapper[4793]: I0127 20:07:00.925910 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.167594 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.167633 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.220845 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.221328 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.221533 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.221850 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: E0127 20:07:01.324713 4793 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.238:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eaf45ea119087 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,LastTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.587444 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.589025 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.646623 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.647329 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.647785 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.648175 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.648505 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.732042 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.732466 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.757778 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.757824 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.779193 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.779861 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.780414 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.781385 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.781669 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.781946 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.798459 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.799626 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.799990 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.800222 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.800452 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.800672 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.800889 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.922835 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.923941 4793 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23" exitCode=0 Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.961275 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.961893 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.962354 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.962767 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.963107 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.963477 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:01 crc kubenswrapper[4793]: I0127 20:07:01.963744 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.233893 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.234982 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.235621 4793 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236071 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236254 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236395 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236559 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236712 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.236876 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377498 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377637 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377630 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377669 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377690 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377876 4793 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377895 4793 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.377857 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.478888 4793 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.937934 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.939068 4793 scope.go:117] "RemoveContainer" containerID="6dbdb20ce3c4a6bdaf1b1410df59d0429f12bf337797573b314a0c80b3bacc23" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.939506 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.951792 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.952260 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.952456 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.952638 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.952784 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.952928 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.953069 4793 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.957298 4793 scope.go:117] "RemoveContainer" containerID="2e8e5449ef647b5d1f5e201cd77dae4c951742918203c4547258115ff609011b" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.974582 4793 scope.go:117] "RemoveContainer" containerID="c29653b85b45342429c8220d1f835de778458e054bc095b302c436592268a522" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.986359 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.986769 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.986992 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.987232 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.987507 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.987737 4793 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.988076 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.988574 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:02 crc kubenswrapper[4793]: I0127 20:07:02.991346 4793 scope.go:117] "RemoveContainer" containerID="2e07a16b0aa00fcfaf5e5147009b1d6c1cfc5e4226d8c0bb71b7742c9bc3ba1c" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.012760 4793 scope.go:117] "RemoveContainer" containerID="697306b41b5ffcb7eb58f2960d5d518501e17db89fab6914e92c93e6995cbd23" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.040136 4793 scope.go:117] "RemoveContainer" containerID="c7a6ff6c323ea92e73074252068acea0c01f0b818072e5ca9f4f27bae7fb7de1" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.472355 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.474774 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.513386 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.513846 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.514213 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.514581 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.514862 4793 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.515148 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.515435 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.515682 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.515966 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.809878 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.948335 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-controller-manager_controller-manager-6c84bb8bf6-rnx9g_84a73fd9-b3ac-4f60-8577-46efd2bd5af2/controller-manager/0.log" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.948391 4793 generic.go:334] "Generic (PLEG): container finished" podID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerID="89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24" exitCode=255 Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.948726 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerDied","Data":"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24"} Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.949338 4793 scope.go:117] "RemoveContainer" containerID="89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.949734 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.950082 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.950337 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.950579 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.951269 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.951874 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.952072 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.991990 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.992647 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.993062 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.993277 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.993458 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.993702 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.993854 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:03 crc kubenswrapper[4793]: I0127 20:07:03.994013 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.781528 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.781904 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.894813 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.894867 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.937670 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.938240 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.938570 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.938825 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.939069 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.939303 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.939527 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.939778 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.940014 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.954058 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-controller-manager_controller-manager-6c84bb8bf6-rnx9g_84a73fd9-b3ac-4f60-8577-46efd2bd5af2/controller-manager/0.log" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.954116 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerStarted","Data":"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37"} Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.955044 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.955496 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.955714 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.955928 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.956249 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.956525 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.956775 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.957104 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.993490 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.994505 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.994959 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.995364 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.995713 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.996001 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.996267 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.996586 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:04 crc kubenswrapper[4793]: I0127 20:07:04.996896 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.806795 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.807083 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.807322 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.807616 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.807917 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.808161 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.808402 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.808715 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.960843 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.967509 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.968425 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.969220 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.970842 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.971322 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.971956 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:05 crc kubenswrapper[4793]: I0127 20:07:05.972442 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:06 crc kubenswrapper[4793]: I0127 20:07:06.019537 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:06 crc kubenswrapper[4793]: I0127 20:07:06.020770 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.483771 4793 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.484770 4793 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.485513 4793 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.488154 4793 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.489321 4793 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:07 crc kubenswrapper[4793]: I0127 20:07:07.489383 4793 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.490008 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="200ms" Jan 27 20:07:07 crc kubenswrapper[4793]: E0127 20:07:07.691517 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="400ms" Jan 27 20:07:08 crc kubenswrapper[4793]: E0127 20:07:08.092953 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="800ms" Jan 27 20:07:08 crc kubenswrapper[4793]: E0127 20:07:08.893716 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="1.6s" Jan 27 20:07:10 crc kubenswrapper[4793]: E0127 20:07:10.495366 4793 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.238:6443: connect: connection refused" interval="3.2s" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.802302 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.803374 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.804503 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.805040 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.805458 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.805902 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.806284 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.806757 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.807685 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.821882 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.821922 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:10 crc kubenswrapper[4793]: E0127 20:07:10.822482 4793 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.823360 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:10 crc kubenswrapper[4793]: W0127 20:07:10.864177 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-257171f18dc0f9bdd26def196012be3acc984b6318295bba1f07b8d79ed10ddf WatchSource:0}: Error finding container 257171f18dc0f9bdd26def196012be3acc984b6318295bba1f07b8d79ed10ddf: Status 404 returned error can't find the container with id 257171f18dc0f9bdd26def196012be3acc984b6318295bba1f07b8d79ed10ddf Jan 27 20:07:10 crc kubenswrapper[4793]: I0127 20:07:10.988539 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"257171f18dc0f9bdd26def196012be3acc984b6318295bba1f07b8d79ed10ddf"} Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.215751 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.216473 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.216912 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.217230 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.217712 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.217992 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.218295 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.218742 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.219067 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: E0127 20:07:11.326747 4793 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.238:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eaf45ea119087 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,LastTimestamp:2026-01-27 20:06:58.417807495 +0000 UTC m=+243.808060651,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.784067 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.785365 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.786181 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.787056 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.787723 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.789094 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.789758 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.790233 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.792604 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.999300 4793 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="528dc8397a83403e20ed4f87548f0efeadf6e0487cbd6c7151c39ff50d4baf71" exitCode=0 Jan 27 20:07:11 crc kubenswrapper[4793]: I0127 20:07:11.999342 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"528dc8397a83403e20ed4f87548f0efeadf6e0487cbd6c7151c39ff50d4baf71"} Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:11.999972 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.000048 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.000308 4793 status_manager.go:851] "Failed to get status for pod" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: E0127 20:07:12.000613 4793 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.000716 4793 status_manager.go:851] "Failed to get status for pod" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" pod="openshift-marketplace/certified-operators-qmlkz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-qmlkz\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.001133 4793 status_manager.go:851] "Failed to get status for pod" podUID="18834635-b900-480e-844b-4c075b169d4a" pod="openshift-marketplace/community-operators-jh9bl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jh9bl\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.001419 4793 status_manager.go:851] "Failed to get status for pod" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" pod="openshift-marketplace/redhat-operators-2lmqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-2lmqb\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.001837 4793 status_manager.go:851] "Failed to get status for pod" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" pod="openshift-marketplace/redhat-marketplace-tcslq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-tcslq\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.002992 4793 status_manager.go:851] "Failed to get status for pod" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-6c84bb8bf6-rnx9g\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.003383 4793 status_manager.go:851] "Failed to get status for pod" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" pod="openshift-marketplace/certified-operators-lcdjd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lcdjd\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:12 crc kubenswrapper[4793]: I0127 20:07:12.004010 4793 status_manager.go:851] "Failed to get status for pod" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" pod="openshift-marketplace/community-operators-n2ntn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-n2ntn\": dial tcp 38.102.83.238:6443: connect: connection refused" Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.027010 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2a3a069f3e89479c5b63296e42a61937d19dd78242e795400f62180b81ec5872"} Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.027367 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e4123816361eba4151a7eb40bff5b0aacbe9fbf4052872d8224f8d0590fefd09"} Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.027406 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7181dec4c29382963d6417c64804f66578572e029cebe669856812f2680994bd"} Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.031801 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.031847 4793 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd" exitCode=1 Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.031871 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd"} Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.032293 4793 scope.go:117] "RemoveContainer" containerID="2ad2717befff7ff17c762d2f3fa551521c0777347507eb73b370d66da5aafbbd" Jan 27 20:07:13 crc kubenswrapper[4793]: I0127 20:07:13.883011 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.040191 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"205306ce9ff0d2b37a590141b5d8ddd3f541816e6c55639345ac02e6ca6fd199"} Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.040234 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fca5ff15c7d73414ce143b2fd413835a314546643ea4eb00b668dd4271332bc1"} Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.040499 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.040780 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.040831 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.043398 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 27 20:07:14 crc kubenswrapper[4793]: I0127 20:07:14.043432 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f00b1f3fbcc57941f87c07ebf8209d1ff9561ce9f1598a7e00ae68f05db5a5dc"} Jan 27 20:07:15 crc kubenswrapper[4793]: I0127 20:07:15.823438 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:15 crc kubenswrapper[4793]: I0127 20:07:15.823801 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:15 crc kubenswrapper[4793]: I0127 20:07:15.829955 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.056686 4793 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.067095 4793 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f895127b-a376-44d3-a251-fa20f3314d46" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.076241 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.076280 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.078854 4793 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f895127b-a376-44d3-a251-fa20f3314d46" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.080717 4793 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://7181dec4c29382963d6417c64804f66578572e029cebe669856812f2680994bd" Jan 27 20:07:19 crc kubenswrapper[4793]: I0127 20:07:19.080749 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:20 crc kubenswrapper[4793]: I0127 20:07:20.081655 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:20 crc kubenswrapper[4793]: I0127 20:07:20.081699 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:20 crc kubenswrapper[4793]: I0127 20:07:20.085418 4793 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f895127b-a376-44d3-a251-fa20f3314d46" Jan 27 20:07:22 crc kubenswrapper[4793]: I0127 20:07:22.661568 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:07:22 crc kubenswrapper[4793]: I0127 20:07:22.668950 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:07:23 crc kubenswrapper[4793]: I0127 20:07:23.095985 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:07:23 crc kubenswrapper[4793]: I0127 20:07:23.099876 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 20:07:29 crc kubenswrapper[4793]: I0127 20:07:29.057397 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 27 20:07:29 crc kubenswrapper[4793]: I0127 20:07:29.145192 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 27 20:07:29 crc kubenswrapper[4793]: I0127 20:07:29.622530 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 27 20:07:29 crc kubenswrapper[4793]: I0127 20:07:29.864102 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.153877 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.201913 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.204758 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.214759 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.265907 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.378085 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.379770 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.443756 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.604035 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.781455 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 27 20:07:30 crc kubenswrapper[4793]: I0127 20:07:30.994155 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.000666 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.154143 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.160003 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.212447 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.239338 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.266008 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.301713 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.630824 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.653985 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.712354 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.737845 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.797092 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 27 20:07:31 crc kubenswrapper[4793]: I0127 20:07:31.918626 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.042343 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.093407 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.167332 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.208778 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.345709 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.799026 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.821809 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.967401 4793 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.967500 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 27 20:07:32 crc kubenswrapper[4793]: I0127 20:07:32.972046 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.009058 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.035361 4793 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.075107 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.097441 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.106433 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.126500 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.126598 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.173426 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.225114 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.262072 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.263295 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.291477 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.362472 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.438245 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.465972 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.559442 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.650476 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.688350 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.703600 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.797524 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.843078 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.950314 4793 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.954790 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.954843 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65"] Jan 27 20:07:33 crc kubenswrapper[4793]: E0127 20:07:33.955056 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" containerName="installer" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.955081 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" containerName="installer" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.955200 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cfe0f42-1b14-48ec-ab8a-3bccfe285fc0" containerName="installer" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.955236 4793 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.955257 4793 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="dcf7a55a-43c6-4c3a-b16b-8a57e7fb54e4" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.955633 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.958814 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.958890 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.959900 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.959967 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.960127 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.960189 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.960330 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.960438 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.960483 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.961777 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.963037 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.966294 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.972154 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.972416 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.979671 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 27 20:07:33 crc kubenswrapper[4793]: I0127 20:07:33.985610 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.000193 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.000149097 podStartE2EDuration="15.000149097s" podCreationTimestamp="2026-01-27 20:07:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:07:33.998932389 +0000 UTC m=+279.389185555" watchObservedRunningTime="2026-01-27 20:07:34.000149097 +0000 UTC m=+279.390402253" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.062673 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.075393 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080522 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080617 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080717 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080804 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080857 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-error\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.080938 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-policies\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081007 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081109 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081143 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-session\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081191 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081271 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-login\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081400 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-dir\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081468 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twqn6\" (UniqueName: \"kubernetes.io/projected/083642d1-121a-44c0-952e-63b1fed1fe9e-kube-api-access-twqn6\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.081513 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.113681 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.117777 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.128596 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.182975 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183027 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183055 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183096 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-error\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183132 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183157 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-policies\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183190 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183240 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-session\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183268 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183295 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-login\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183317 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-dir\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183339 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twqn6\" (UniqueName: \"kubernetes.io/projected/083642d1-121a-44c0-952e-63b1fed1fe9e-kube-api-access-twqn6\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183366 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183397 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183626 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-dir\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.183974 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-audit-policies\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.184002 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.184968 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.185257 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.188643 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.188815 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.188932 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-login\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.189593 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-session\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.190135 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-user-template-error\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.190661 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.191037 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.192463 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/083642d1-121a-44c0-952e-63b1fed1fe9e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.199876 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twqn6\" (UniqueName: \"kubernetes.io/projected/083642d1-121a-44c0-952e-63b1fed1fe9e-kube-api-access-twqn6\") pod \"oauth-openshift-6cbd7f6dbc-whk65\" (UID: \"083642d1-121a-44c0-952e-63b1fed1fe9e\") " pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.276533 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.376269 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.420486 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.435401 4793 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.458622 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.475905 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.566583 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.696531 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.749902 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.857020 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 27 20:07:34 crc kubenswrapper[4793]: I0127 20:07:34.986774 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.055595 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.114097 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.120922 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.168917 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.226489 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.355157 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.367292 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.432202 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.457916 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.540241 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.626668 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.758079 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.824877 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.949080 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.973897 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 27 20:07:35 crc kubenswrapper[4793]: I0127 20:07:35.986495 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.016062 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.023130 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.150352 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.214284 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.282137 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.395442 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.459615 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.538254 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.620313 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.646224 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.784152 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.834652 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.845750 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.849158 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.872538 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.938152 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:07:36 crc kubenswrapper[4793]: I0127 20:07:36.997621 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.117722 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.120369 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.128612 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.137150 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.168186 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.270348 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.303064 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.315233 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.350815 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.357361 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.367535 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.430385 4793 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.455734 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.459229 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.550987 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.756257 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.773002 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.865473 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.937168 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.947537 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:07:37 crc kubenswrapper[4793]: I0127 20:07:37.953827 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.021887 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.064969 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.190126 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.197655 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.200068 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.217080 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.332329 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.357932 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.484564 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.553695 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.603222 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.691969 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.692664 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.709813 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.738285 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.769387 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 20:07:38 crc kubenswrapper[4793]: I0127 20:07:38.992394 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.113161 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.117122 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.117755 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.264665 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.265573 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.310362 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.423769 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.429490 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.430313 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.452683 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.474213 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.522484 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.594514 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65"] Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.604433 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.795756 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65"] Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.810644 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.818627 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.847595 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.860237 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.932011 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.974988 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 27 20:07:39 crc kubenswrapper[4793]: I0127 20:07:39.977541 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.007092 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.013160 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.115346 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.133560 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.185855 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.193684 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" event={"ID":"083642d1-121a-44c0-952e-63b1fed1fe9e","Type":"ContainerStarted","Data":"d8d3f8fa90a220741de1559bb00254f87809d03bdf45ccdd6a4a8d6403d9e152"} Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.193808 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" event={"ID":"083642d1-121a-44c0-952e-63b1fed1fe9e","Type":"ContainerStarted","Data":"510e476e690726d28659aa1e607ffb4dfcf97985b43ba28131f3d6cc46845439"} Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.196724 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.197474 4793 patch_prober.go:28] interesting pod/oauth-openshift-6cbd7f6dbc-whk65 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.62:6443/healthz\": dial tcp 10.217.0.62:6443: connect: connection refused" start-of-body= Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.197636 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" podUID="083642d1-121a-44c0-952e-63b1fed1fe9e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.62:6443/healthz\": dial tcp 10.217.0.62:6443: connect: connection refused" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.211735 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.221304 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" podStartSLOduration=73.221273038 podStartE2EDuration="1m13.221273038s" podCreationTimestamp="2026-01-27 20:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:07:40.216887617 +0000 UTC m=+285.607140773" watchObservedRunningTime="2026-01-27 20:07:40.221273038 +0000 UTC m=+285.611526194" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.346217 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.358567 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.366590 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.373332 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.389306 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.390029 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.565970 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.679807 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.685217 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.772444 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 27 20:07:40 crc kubenswrapper[4793]: I0127 20:07:40.868767 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.030682 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.161280 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.208078 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-6cbd7f6dbc-whk65_083642d1-121a-44c0-952e-63b1fed1fe9e/oauth-openshift/0.log" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.208169 4793 generic.go:334] "Generic (PLEG): container finished" podID="083642d1-121a-44c0-952e-63b1fed1fe9e" containerID="d8d3f8fa90a220741de1559bb00254f87809d03bdf45ccdd6a4a8d6403d9e152" exitCode=255 Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.208208 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" event={"ID":"083642d1-121a-44c0-952e-63b1fed1fe9e","Type":"ContainerDied","Data":"d8d3f8fa90a220741de1559bb00254f87809d03bdf45ccdd6a4a8d6403d9e152"} Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.208869 4793 scope.go:117] "RemoveContainer" containerID="d8d3f8fa90a220741de1559bb00254f87809d03bdf45ccdd6a4a8d6403d9e152" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.228410 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.247285 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.332962 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.347855 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.361820 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.526870 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.667249 4793 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.667526 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://a48b2acf599910678953475595199d8907e6ea5f0eaf9dd9efe4650149da85ee" gracePeriod=5 Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.709240 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.724887 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.821115 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.856899 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 27 20:07:41 crc kubenswrapper[4793]: I0127 20:07:41.897379 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.053280 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.142993 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.197969 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.216421 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-6cbd7f6dbc-whk65_083642d1-121a-44c0-952e-63b1fed1fe9e/oauth-openshift/0.log" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.216505 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" event={"ID":"083642d1-121a-44c0-952e-63b1fed1fe9e","Type":"ContainerStarted","Data":"918e6ba231b1ae875149ad2aa6f9c0071b11aeee2d671dde39ca395fe4e7a0d7"} Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.217094 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.222306 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6cbd7f6dbc-whk65" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.237017 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.482695 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.485109 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.521635 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.632176 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.938517 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 27 20:07:42 crc kubenswrapper[4793]: I0127 20:07:42.961928 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.200626 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.219307 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.248520 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.290643 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.337697 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.387425 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.388776 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.655111 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.674850 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.722156 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.756865 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.823965 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.825998 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.927144 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 27 20:07:43 crc kubenswrapper[4793]: I0127 20:07:43.965339 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.116145 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.204961 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.258819 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.346153 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.390531 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.410930 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.438460 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.588335 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.641146 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 27 20:07:44 crc kubenswrapper[4793]: I0127 20:07:44.724301 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 27 20:07:45 crc kubenswrapper[4793]: I0127 20:07:45.264300 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 27 20:07:45 crc kubenswrapper[4793]: I0127 20:07:45.321824 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 27 20:07:45 crc kubenswrapper[4793]: I0127 20:07:45.346850 4793 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 27 20:07:45 crc kubenswrapper[4793]: I0127 20:07:45.354702 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 27 20:07:45 crc kubenswrapper[4793]: I0127 20:07:45.354841 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 27 20:07:46 crc kubenswrapper[4793]: I0127 20:07:46.494326 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.246783 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.247115 4793 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="a48b2acf599910678953475595199d8907e6ea5f0eaf9dd9efe4650149da85ee" exitCode=137 Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.247173 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd80168af1e7465f6d8c17f1a65dbd4eb10ab7b37b37d7c57f52c9e1bb593fcf" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.253189 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.253258 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.281863 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.281943 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.281984 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282050 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282074 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282394 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282438 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282475 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.282664 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.291270 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.383277 4793 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.383320 4793 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.383334 4793 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.383347 4793 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.383358 4793 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:47 crc kubenswrapper[4793]: I0127 20:07:47.810100 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 27 20:07:48 crc kubenswrapper[4793]: I0127 20:07:48.257604 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.254054 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.254618 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" containerID="cri-o://2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37" gracePeriod=30 Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.351535 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.351798 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" podUID="f14a7294-c538-4d39-abf5-b286d7f08659" containerName="route-controller-manager" containerID="cri-o://dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174" gracePeriod=30 Jan 27 20:07:49 crc kubenswrapper[4793]: E0127 20:07:49.397240 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84a73fd9_b3ac_4f60_8577_46efd2bd5af2.slice/crio-conmon-2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37.scope\": RecentStats: unable to find data in memory cache]" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.749955 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-controller-manager_controller-manager-6c84bb8bf6-rnx9g_84a73fd9-b3ac-4f60-8577-46efd2bd5af2/controller-manager/0.log" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.750375 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.914591 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config\") pod \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.914693 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca\") pod \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.914747 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert\") pod \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.914778 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles\") pod \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.914885 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx9tn\" (UniqueName: \"kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn\") pod \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\" (UID: \"84a73fd9-b3ac-4f60-8577-46efd2bd5af2\") " Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.915353 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca" (OuterVolumeSpecName: "client-ca") pod "84a73fd9-b3ac-4f60-8577-46efd2bd5af2" (UID: "84a73fd9-b3ac-4f60-8577-46efd2bd5af2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.915497 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config" (OuterVolumeSpecName: "config") pod "84a73fd9-b3ac-4f60-8577-46efd2bd5af2" (UID: "84a73fd9-b3ac-4f60-8577-46efd2bd5af2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.916195 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "84a73fd9-b3ac-4f60-8577-46efd2bd5af2" (UID: "84a73fd9-b3ac-4f60-8577-46efd2bd5af2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.920323 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "84a73fd9-b3ac-4f60-8577-46efd2bd5af2" (UID: "84a73fd9-b3ac-4f60-8577-46efd2bd5af2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:07:49 crc kubenswrapper[4793]: I0127 20:07:49.921931 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn" (OuterVolumeSpecName: "kube-api-access-mx9tn") pod "84a73fd9-b3ac-4f60-8577-46efd2bd5af2" (UID: "84a73fd9-b3ac-4f60-8577-46efd2bd5af2"). InnerVolumeSpecName "kube-api-access-mx9tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.016316 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx9tn\" (UniqueName: \"kubernetes.io/projected/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-kube-api-access-mx9tn\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.016383 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.016398 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.016410 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.016424 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84a73fd9-b3ac-4f60-8577-46efd2bd5af2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.227932 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280663 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-controller-manager_controller-manager-6c84bb8bf6-rnx9g_84a73fd9-b3ac-4f60-8577-46efd2bd5af2/controller-manager/0.log" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280759 4793 generic.go:334] "Generic (PLEG): container finished" podID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerID="2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37" exitCode=0 Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280831 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280874 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerDied","Data":"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37"} Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g" event={"ID":"84a73fd9-b3ac-4f60-8577-46efd2bd5af2","Type":"ContainerDied","Data":"357ba6d3067ad603003a59ade0266e244fd9a32dc089be9b623d56866597b397"} Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.280928 4793 scope.go:117] "RemoveContainer" containerID="2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.282444 4793 generic.go:334] "Generic (PLEG): container finished" podID="f14a7294-c538-4d39-abf5-b286d7f08659" containerID="dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174" exitCode=0 Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.282485 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" event={"ID":"f14a7294-c538-4d39-abf5-b286d7f08659","Type":"ContainerDied","Data":"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174"} Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.282515 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" event={"ID":"f14a7294-c538-4d39-abf5-b286d7f08659","Type":"ContainerDied","Data":"ee416f8b36cb58bcd200b0079380dff6837b9023dce93cab466a975747492789"} Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.283030 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.308402 4793 scope.go:117] "RemoveContainer" containerID="89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.309841 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.313928 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c84bb8bf6-rnx9g"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.320166 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config\") pod \"f14a7294-c538-4d39-abf5-b286d7f08659\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.320202 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zl88l\" (UniqueName: \"kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l\") pod \"f14a7294-c538-4d39-abf5-b286d7f08659\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.320224 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert\") pod \"f14a7294-c538-4d39-abf5-b286d7f08659\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.320264 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca\") pod \"f14a7294-c538-4d39-abf5-b286d7f08659\" (UID: \"f14a7294-c538-4d39-abf5-b286d7f08659\") " Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.320996 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca" (OuterVolumeSpecName: "client-ca") pod "f14a7294-c538-4d39-abf5-b286d7f08659" (UID: "f14a7294-c538-4d39-abf5-b286d7f08659"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.321240 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.321384 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config" (OuterVolumeSpecName: "config") pod "f14a7294-c538-4d39-abf5-b286d7f08659" (UID: "f14a7294-c538-4d39-abf5-b286d7f08659"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.323533 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f14a7294-c538-4d39-abf5-b286d7f08659" (UID: "f14a7294-c538-4d39-abf5-b286d7f08659"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.324021 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l" (OuterVolumeSpecName: "kube-api-access-zl88l") pod "f14a7294-c538-4d39-abf5-b286d7f08659" (UID: "f14a7294-c538-4d39-abf5-b286d7f08659"). InnerVolumeSpecName "kube-api-access-zl88l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.328856 4793 scope.go:117] "RemoveContainer" containerID="2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.329233 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37\": container with ID starting with 2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37 not found: ID does not exist" containerID="2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.329280 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37"} err="failed to get container status \"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37\": rpc error: code = NotFound desc = could not find container \"2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37\": container with ID starting with 2e0a2c3fcdd3ec0b4ff24b0f92b789f9d01e78188166e09f5a18f89206947e37 not found: ID does not exist" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.329312 4793 scope.go:117] "RemoveContainer" containerID="89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.329702 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24\": container with ID starting with 89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24 not found: ID does not exist" containerID="89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.329735 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24"} err="failed to get container status \"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24\": rpc error: code = NotFound desc = could not find container \"89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24\": container with ID starting with 89b7cfc50e72b20fdd68fb7f4b708fa9a87f228ebf89afb3695518a022a8de24 not found: ID does not exist" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.329761 4793 scope.go:117] "RemoveContainer" containerID="dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.342738 4793 scope.go:117] "RemoveContainer" containerID="dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.343267 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174\": container with ID starting with dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174 not found: ID does not exist" containerID="dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.343317 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174"} err="failed to get container status \"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174\": rpc error: code = NotFound desc = could not find container \"dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174\": container with ID starting with dce985d5f9bfd8c6b26001d1ceeb60e3f35b775483b9709ab3728e27963c8174 not found: ID does not exist" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.421945 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7294-c538-4d39-abf5-b286d7f08659-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.421993 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zl88l\" (UniqueName: \"kubernetes.io/projected/f14a7294-c538-4d39-abf5-b286d7f08659-kube-api-access-zl88l\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.422007 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7294-c538-4d39-abf5-b286d7f08659-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.503689 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.503964 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.503989 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.504019 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14a7294-c538-4d39-abf5-b286d7f08659" containerName="route-controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504028 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14a7294-c538-4d39-abf5-b286d7f08659" containerName="route-controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.504053 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504061 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 20:07:50 crc kubenswrapper[4793]: E0127 20:07:50.504069 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504077 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504204 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14a7294-c538-4d39-abf5-b286d7f08659" containerName="route-controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504222 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504232 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504243 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" containerName="controller-manager" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.504678 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.507377 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.508027 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.508165 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.508464 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.508944 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.509716 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.510318 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.510814 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.514429 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.528983 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.548537 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.609706 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.612937 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b4d8cc5bd-c7h8r"] Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631426 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631474 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7grng\" (UniqueName: \"kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631497 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwc65\" (UniqueName: \"kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631523 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631580 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631609 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631642 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631659 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.631689 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732819 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732872 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732909 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732935 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732964 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7grng\" (UniqueName: \"kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.732985 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwc65\" (UniqueName: \"kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.733015 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.733051 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.733085 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.734083 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.734337 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.734461 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.735442 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.737063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.738785 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.739305 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.751696 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7grng\" (UniqueName: \"kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng\") pod \"route-controller-manager-ccbd48f84-rjxdr\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.754129 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwc65\" (UniqueName: \"kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65\") pod \"controller-manager-68ff5ff7df-fm8sf\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.849206 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:50 crc kubenswrapper[4793]: I0127 20:07:50.856644 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.080573 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.120921 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:07:51 crc kubenswrapper[4793]: W0127 20:07:51.124734 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d6bdd15_831b_4691_8bd2_9970008c9f11.slice/crio-7fbd2e3e4379f21b16186a6f0aee2057ed823e4e5ae6653060f687492a3661f2 WatchSource:0}: Error finding container 7fbd2e3e4379f21b16186a6f0aee2057ed823e4e5ae6653060f687492a3661f2: Status 404 returned error can't find the container with id 7fbd2e3e4379f21b16186a6f0aee2057ed823e4e5ae6653060f687492a3661f2 Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.298408 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" event={"ID":"6038e030-f224-4b63-a422-c155fa725da3","Type":"ContainerStarted","Data":"a78be08fb97d37e2483bf6d6e3bf598470ca922b9af9dfba7214ca6bdaadedbb"} Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.299823 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.299916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" event={"ID":"6038e030-f224-4b63-a422-c155fa725da3","Type":"ContainerStarted","Data":"baed3f7cced948974ef7f90a6fd1ac7f1c146137ef7a3653ffe8135705f86c64"} Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.301326 4793 patch_prober.go:28] interesting pod/route-controller-manager-ccbd48f84-rjxdr container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" start-of-body= Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.301674 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" podUID="6038e030-f224-4b63-a422-c155fa725da3" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.302009 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" event={"ID":"9d6bdd15-831b-4691-8bd2-9970008c9f11","Type":"ContainerStarted","Data":"97d24f032e191520617a2ccb7bfaab6d445d03b780a291b79a91eaabbfea57a5"} Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.302062 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" event={"ID":"9d6bdd15-831b-4691-8bd2-9970008c9f11","Type":"ContainerStarted","Data":"7fbd2e3e4379f21b16186a6f0aee2057ed823e4e5ae6653060f687492a3661f2"} Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.302728 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.305497 4793 patch_prober.go:28] interesting pod/controller-manager-68ff5ff7df-fm8sf container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: connect: connection refused" start-of-body= Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.305573 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": dial tcp 10.217.0.63:8443: connect: connection refused" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.319719 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" podStartSLOduration=2.319699452 podStartE2EDuration="2.319699452s" podCreationTimestamp="2026-01-27 20:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:07:51.316672534 +0000 UTC m=+296.706925700" watchObservedRunningTime="2026-01-27 20:07:51.319699452 +0000 UTC m=+296.709952608" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.335480 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" podStartSLOduration=2.335456009 podStartE2EDuration="2.335456009s" podCreationTimestamp="2026-01-27 20:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:07:51.331800287 +0000 UTC m=+296.722053443" watchObservedRunningTime="2026-01-27 20:07:51.335456009 +0000 UTC m=+296.725709176" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.810197 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84a73fd9-b3ac-4f60-8577-46efd2bd5af2" path="/var/lib/kubelet/pods/84a73fd9-b3ac-4f60-8577-46efd2bd5af2/volumes" Jan 27 20:07:51 crc kubenswrapper[4793]: I0127 20:07:51.810996 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f14a7294-c538-4d39-abf5-b286d7f08659" path="/var/lib/kubelet/pods/f14a7294-c538-4d39-abf5-b286d7f08659/volumes" Jan 27 20:07:52 crc kubenswrapper[4793]: I0127 20:07:52.314661 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:07:52 crc kubenswrapper[4793]: I0127 20:07:52.314826 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:07:55 crc kubenswrapper[4793]: I0127 20:07:55.556952 4793 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 27 20:07:59 crc kubenswrapper[4793]: I0127 20:07:59.350029 4793 generic.go:334] "Generic (PLEG): container finished" podID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerID="4471df472af763cae8ddf7904d3cba15132e1f1e747ad9bf69f21eb469057097" exitCode=0 Jan 27 20:07:59 crc kubenswrapper[4793]: I0127 20:07:59.350573 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerDied","Data":"4471df472af763cae8ddf7904d3cba15132e1f1e747ad9bf69f21eb469057097"} Jan 27 20:07:59 crc kubenswrapper[4793]: I0127 20:07:59.350955 4793 scope.go:117] "RemoveContainer" containerID="4471df472af763cae8ddf7904d3cba15132e1f1e747ad9bf69f21eb469057097" Jan 27 20:08:00 crc kubenswrapper[4793]: I0127 20:08:00.357067 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerStarted","Data":"fc7a8978ee915d5acdf909d7b613d8479e4927eb34158dffea8b0e0e84f249c4"} Jan 27 20:08:00 crc kubenswrapper[4793]: I0127 20:08:00.358312 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:08:00 crc kubenswrapper[4793]: I0127 20:08:00.359321 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.244490 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.246404 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerName="controller-manager" containerID="cri-o://97d24f032e191520617a2ccb7bfaab6d445d03b780a291b79a91eaabbfea57a5" gracePeriod=30 Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.280638 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.280875 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" podUID="6038e030-f224-4b63-a422-c155fa725da3" containerName="route-controller-manager" containerID="cri-o://a78be08fb97d37e2483bf6d6e3bf598470ca922b9af9dfba7214ca6bdaadedbb" gracePeriod=30 Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.422274 4793 generic.go:334] "Generic (PLEG): container finished" podID="6038e030-f224-4b63-a422-c155fa725da3" containerID="a78be08fb97d37e2483bf6d6e3bf598470ca922b9af9dfba7214ca6bdaadedbb" exitCode=0 Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.422361 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" event={"ID":"6038e030-f224-4b63-a422-c155fa725da3","Type":"ContainerDied","Data":"a78be08fb97d37e2483bf6d6e3bf598470ca922b9af9dfba7214ca6bdaadedbb"} Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.426973 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerID="97d24f032e191520617a2ccb7bfaab6d445d03b780a291b79a91eaabbfea57a5" exitCode=0 Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.427046 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" event={"ID":"9d6bdd15-831b-4691-8bd2-9970008c9f11","Type":"ContainerDied","Data":"97d24f032e191520617a2ccb7bfaab6d445d03b780a291b79a91eaabbfea57a5"} Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.647442 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.652196 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757120 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca\") pod \"6038e030-f224-4b63-a422-c155fa725da3\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757234 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert\") pod \"9d6bdd15-831b-4691-8bd2-9970008c9f11\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757260 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles\") pod \"9d6bdd15-831b-4691-8bd2-9970008c9f11\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757279 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config\") pod \"6038e030-f224-4b63-a422-c155fa725da3\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757300 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwc65\" (UniqueName: \"kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65\") pod \"9d6bdd15-831b-4691-8bd2-9970008c9f11\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757318 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca\") pod \"9d6bdd15-831b-4691-8bd2-9970008c9f11\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757352 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7grng\" (UniqueName: \"kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng\") pod \"6038e030-f224-4b63-a422-c155fa725da3\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757387 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config\") pod \"9d6bdd15-831b-4691-8bd2-9970008c9f11\" (UID: \"9d6bdd15-831b-4691-8bd2-9970008c9f11\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.757402 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert\") pod \"6038e030-f224-4b63-a422-c155fa725da3\" (UID: \"6038e030-f224-4b63-a422-c155fa725da3\") " Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.758355 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca" (OuterVolumeSpecName: "client-ca") pod "6038e030-f224-4b63-a422-c155fa725da3" (UID: "6038e030-f224-4b63-a422-c155fa725da3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.758372 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config" (OuterVolumeSpecName: "config") pod "6038e030-f224-4b63-a422-c155fa725da3" (UID: "6038e030-f224-4b63-a422-c155fa725da3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.759308 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9d6bdd15-831b-4691-8bd2-9970008c9f11" (UID: "9d6bdd15-831b-4691-8bd2-9970008c9f11"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.759722 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca" (OuterVolumeSpecName: "client-ca") pod "9d6bdd15-831b-4691-8bd2-9970008c9f11" (UID: "9d6bdd15-831b-4691-8bd2-9970008c9f11"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.759841 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config" (OuterVolumeSpecName: "config") pod "9d6bdd15-831b-4691-8bd2-9970008c9f11" (UID: "9d6bdd15-831b-4691-8bd2-9970008c9f11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.763720 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65" (OuterVolumeSpecName: "kube-api-access-cwc65") pod "9d6bdd15-831b-4691-8bd2-9970008c9f11" (UID: "9d6bdd15-831b-4691-8bd2-9970008c9f11"). InnerVolumeSpecName "kube-api-access-cwc65". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.763647 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng" (OuterVolumeSpecName: "kube-api-access-7grng") pod "6038e030-f224-4b63-a422-c155fa725da3" (UID: "6038e030-f224-4b63-a422-c155fa725da3"). InnerVolumeSpecName "kube-api-access-7grng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.764638 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d6bdd15-831b-4691-8bd2-9970008c9f11" (UID: "9d6bdd15-831b-4691-8bd2-9970008c9f11"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.764616 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6038e030-f224-4b63-a422-c155fa725da3" (UID: "6038e030-f224-4b63-a422-c155fa725da3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859469 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d6bdd15-831b-4691-8bd2-9970008c9f11-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859526 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859573 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859595 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859612 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwc65\" (UniqueName: \"kubernetes.io/projected/9d6bdd15-831b-4691-8bd2-9970008c9f11-kube-api-access-cwc65\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859630 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7grng\" (UniqueName: \"kubernetes.io/projected/6038e030-f224-4b63-a422-c155fa725da3-kube-api-access-7grng\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859646 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6bdd15-831b-4691-8bd2-9970008c9f11-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859662 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6038e030-f224-4b63-a422-c155fa725da3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:09 crc kubenswrapper[4793]: I0127 20:08:09.859677 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6038e030-f224-4b63-a422-c155fa725da3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.433873 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.433869 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf" event={"ID":"9d6bdd15-831b-4691-8bd2-9970008c9f11","Type":"ContainerDied","Data":"7fbd2e3e4379f21b16186a6f0aee2057ed823e4e5ae6653060f687492a3661f2"} Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.434057 4793 scope.go:117] "RemoveContainer" containerID="97d24f032e191520617a2ccb7bfaab6d445d03b780a291b79a91eaabbfea57a5" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.436785 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" event={"ID":"6038e030-f224-4b63-a422-c155fa725da3","Type":"ContainerDied","Data":"baed3f7cced948974ef7f90a6fd1ac7f1c146137ef7a3653ffe8135705f86c64"} Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.436890 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.452277 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.455092 4793 scope.go:117] "RemoveContainer" containerID="a78be08fb97d37e2483bf6d6e3bf598470ca922b9af9dfba7214ca6bdaadedbb" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.456542 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-fm8sf"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.463652 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.468130 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-rjxdr"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516004 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:08:10 crc kubenswrapper[4793]: E0127 20:08:10.516302 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6038e030-f224-4b63-a422-c155fa725da3" containerName="route-controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516324 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6038e030-f224-4b63-a422-c155fa725da3" containerName="route-controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: E0127 20:08:10.516340 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerName="controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516349 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerName="controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516526 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6038e030-f224-4b63-a422-c155fa725da3" containerName="route-controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516563 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" containerName="controller-manager" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.516996 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.519112 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.519415 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.521280 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.521537 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.522248 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.522450 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.523498 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.524392 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.528853 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.529189 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.530669 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.530770 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.530953 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.531040 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.531186 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.531287 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.548822 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671263 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mbh7\" (UniqueName: \"kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671626 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671656 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671681 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671709 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671730 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671870 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.671971 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.672008 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2q6q\" (UniqueName: \"kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.773845 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.773908 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.773948 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.773972 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.774646 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.774698 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.774725 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2q6q\" (UniqueName: \"kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.774762 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mbh7\" (UniqueName: \"kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.774791 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.775431 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.775723 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.775779 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.775783 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.776110 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.779340 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.780066 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.792420 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mbh7\" (UniqueName: \"kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7\") pod \"route-controller-manager-85776c4794-qtxn2\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.793596 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2q6q\" (UniqueName: \"kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q\") pod \"controller-manager-6dfcd5c5b4-797wq\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.853244 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:10 crc kubenswrapper[4793]: I0127 20:08:10.862831 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.054440 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:08:11 crc kubenswrapper[4793]: W0127 20:08:11.056304 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9efa2d6_648d_4cc8_a1c9_961c041ff618.slice/crio-4dc0959bbb45d40d629ed036f089dc3bd883a7091c84d77eaf98cf4f3302adda WatchSource:0}: Error finding container 4dc0959bbb45d40d629ed036f089dc3bd883a7091c84d77eaf98cf4f3302adda: Status 404 returned error can't find the container with id 4dc0959bbb45d40d629ed036f089dc3bd883a7091c84d77eaf98cf4f3302adda Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.289287 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:11 crc kubenswrapper[4793]: W0127 20:08:11.293109 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29330d1b_bfa4_40ed_8423_78377d21b43c.slice/crio-f226169087ea7afeea936c5c41022c46927eebe30556f388d0e90a691365536e WatchSource:0}: Error finding container f226169087ea7afeea936c5c41022c46927eebe30556f388d0e90a691365536e: Status 404 returned error can't find the container with id f226169087ea7afeea936c5c41022c46927eebe30556f388d0e90a691365536e Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.443394 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" event={"ID":"f9efa2d6-648d-4cc8-a1c9-961c041ff618","Type":"ContainerStarted","Data":"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642"} Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.443442 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" event={"ID":"f9efa2d6-648d-4cc8-a1c9-961c041ff618","Type":"ContainerStarted","Data":"4dc0959bbb45d40d629ed036f089dc3bd883a7091c84d77eaf98cf4f3302adda"} Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.443814 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.444301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" event={"ID":"29330d1b-bfa4-40ed-8423-78377d21b43c","Type":"ContainerStarted","Data":"f226169087ea7afeea936c5c41022c46927eebe30556f388d0e90a691365536e"} Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.448465 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.461615 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" podStartSLOduration=2.461593947 podStartE2EDuration="2.461593947s" podCreationTimestamp="2026-01-27 20:08:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:08:11.459604532 +0000 UTC m=+316.849857688" watchObservedRunningTime="2026-01-27 20:08:11.461593947 +0000 UTC m=+316.851847123" Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.810087 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6038e030-f224-4b63-a422-c155fa725da3" path="/var/lib/kubelet/pods/6038e030-f224-4b63-a422-c155fa725da3/volumes" Jan 27 20:08:11 crc kubenswrapper[4793]: I0127 20:08:11.810863 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6bdd15-831b-4691-8bd2-9970008c9f11" path="/var/lib/kubelet/pods/9d6bdd15-831b-4691-8bd2-9970008c9f11/volumes" Jan 27 20:08:12 crc kubenswrapper[4793]: I0127 20:08:12.457407 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" event={"ID":"29330d1b-bfa4-40ed-8423-78377d21b43c","Type":"ContainerStarted","Data":"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa"} Jan 27 20:08:12 crc kubenswrapper[4793]: I0127 20:08:12.477182 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" podStartSLOduration=3.477163861 podStartE2EDuration="3.477163861s" podCreationTimestamp="2026-01-27 20:08:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:08:12.474317836 +0000 UTC m=+317.864571012" watchObservedRunningTime="2026-01-27 20:08:12.477163861 +0000 UTC m=+317.867417017" Jan 27 20:08:13 crc kubenswrapper[4793]: I0127 20:08:13.463477 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:13 crc kubenswrapper[4793]: I0127 20:08:13.470613 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.277685 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.279308 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" podUID="29330d1b-bfa4-40ed-8423-78377d21b43c" containerName="route-controller-manager" containerID="cri-o://2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa" gracePeriod=30 Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.627359 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.823376 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mbh7\" (UniqueName: \"kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7\") pod \"29330d1b-bfa4-40ed-8423-78377d21b43c\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.823422 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config\") pod \"29330d1b-bfa4-40ed-8423-78377d21b43c\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.823466 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert\") pod \"29330d1b-bfa4-40ed-8423-78377d21b43c\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.823676 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca\") pod \"29330d1b-bfa4-40ed-8423-78377d21b43c\" (UID: \"29330d1b-bfa4-40ed-8423-78377d21b43c\") " Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.824815 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca" (OuterVolumeSpecName: "client-ca") pod "29330d1b-bfa4-40ed-8423-78377d21b43c" (UID: "29330d1b-bfa4-40ed-8423-78377d21b43c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.825027 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config" (OuterVolumeSpecName: "config") pod "29330d1b-bfa4-40ed-8423-78377d21b43c" (UID: "29330d1b-bfa4-40ed-8423-78377d21b43c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829141 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "29330d1b-bfa4-40ed-8423-78377d21b43c" (UID: "29330d1b-bfa4-40ed-8423-78377d21b43c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829215 4793 generic.go:334] "Generic (PLEG): container finished" podID="29330d1b-bfa4-40ed-8423-78377d21b43c" containerID="2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa" exitCode=0 Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829264 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" event={"ID":"29330d1b-bfa4-40ed-8423-78377d21b43c","Type":"ContainerDied","Data":"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa"} Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829291 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" event={"ID":"29330d1b-bfa4-40ed-8423-78377d21b43c","Type":"ContainerDied","Data":"f226169087ea7afeea936c5c41022c46927eebe30556f388d0e90a691365536e"} Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829307 4793 scope.go:117] "RemoveContainer" containerID="2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.829268 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.833772 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7" (OuterVolumeSpecName: "kube-api-access-8mbh7") pod "29330d1b-bfa4-40ed-8423-78377d21b43c" (UID: "29330d1b-bfa4-40ed-8423-78377d21b43c"). InnerVolumeSpecName "kube-api-access-8mbh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.868587 4793 scope.go:117] "RemoveContainer" containerID="2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa" Jan 27 20:08:49 crc kubenswrapper[4793]: E0127 20:08:49.869060 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa\": container with ID starting with 2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa not found: ID does not exist" containerID="2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.869210 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa"} err="failed to get container status \"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa\": rpc error: code = NotFound desc = could not find container \"2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa\": container with ID starting with 2c6d0bc873467947b16385da922d00c237d497ab1be365b84f9f9f8fc10215fa not found: ID does not exist" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.928243 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.928522 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mbh7\" (UniqueName: \"kubernetes.io/projected/29330d1b-bfa4-40ed-8423-78377d21b43c-kube-api-access-8mbh7\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.928628 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29330d1b-bfa4-40ed-8423-78377d21b43c-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:49 crc kubenswrapper[4793]: I0127 20:08:49.928689 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29330d1b-bfa4-40ed-8423-78377d21b43c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.158658 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.166016 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85776c4794-qtxn2"] Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.590854 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d"] Jan 27 20:08:50 crc kubenswrapper[4793]: E0127 20:08:50.591134 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29330d1b-bfa4-40ed-8423-78377d21b43c" containerName="route-controller-manager" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.591149 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="29330d1b-bfa4-40ed-8423-78377d21b43c" containerName="route-controller-manager" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.591269 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="29330d1b-bfa4-40ed-8423-78377d21b43c" containerName="route-controller-manager" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.591726 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.594496 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.594496 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.594520 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.594597 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.594976 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.595037 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.602665 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d"] Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.636355 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-config\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.636417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-client-ca\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.636675 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/566b97e6-6ab0-415c-80f1-aafec8822b21-serving-cert\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.636725 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz6zv\" (UniqueName: \"kubernetes.io/projected/566b97e6-6ab0-415c-80f1-aafec8822b21-kube-api-access-kz6zv\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.737181 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/566b97e6-6ab0-415c-80f1-aafec8822b21-serving-cert\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.737508 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz6zv\" (UniqueName: \"kubernetes.io/projected/566b97e6-6ab0-415c-80f1-aafec8822b21-kube-api-access-kz6zv\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.737562 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-config\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.737597 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-client-ca\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.738479 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-client-ca\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.739320 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/566b97e6-6ab0-415c-80f1-aafec8822b21-config\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.750934 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/566b97e6-6ab0-415c-80f1-aafec8822b21-serving-cert\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.753828 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz6zv\" (UniqueName: \"kubernetes.io/projected/566b97e6-6ab0-415c-80f1-aafec8822b21-kube-api-access-kz6zv\") pod \"route-controller-manager-ccbd48f84-lrd7d\" (UID: \"566b97e6-6ab0-415c-80f1-aafec8822b21\") " pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:50 crc kubenswrapper[4793]: I0127 20:08:50.907222 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.093976 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d"] Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.810536 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29330d1b-bfa4-40ed-8423-78377d21b43c" path="/var/lib/kubelet/pods/29330d1b-bfa4-40ed-8423-78377d21b43c/volumes" Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.844557 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" event={"ID":"566b97e6-6ab0-415c-80f1-aafec8822b21","Type":"ContainerStarted","Data":"93f9bc247bb6e493a08a860016c3e41aef250554a215c265ec12b4e0f7735cb6"} Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.844604 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" event={"ID":"566b97e6-6ab0-415c-80f1-aafec8822b21","Type":"ContainerStarted","Data":"52689b3f6ca5cc41fae2c8bcff1dbd25a0d95833512109b47f0164fbfc2ac198"} Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.844902 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.852055 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" Jan 27 20:08:51 crc kubenswrapper[4793]: I0127 20:08:51.871891 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-ccbd48f84-lrd7d" podStartSLOduration=2.871871303 podStartE2EDuration="2.871871303s" podCreationTimestamp="2026-01-27 20:08:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:08:51.869379782 +0000 UTC m=+357.259632938" watchObservedRunningTime="2026-01-27 20:08:51.871871303 +0000 UTC m=+357.262124459" Jan 27 20:08:52 crc kubenswrapper[4793]: I0127 20:08:52.754195 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:08:52 crc kubenswrapper[4793]: I0127 20:08:52.754629 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.623258 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-nqzlb"] Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.624711 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.638929 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-nqzlb"] Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740576 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-trusted-ca\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740629 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-registry-tls\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740676 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-bound-sa-token\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740701 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0ede7f50-6746-417f-a057-629383bfcae7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740731 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0ede7f50-6746-417f-a057-629383bfcae7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740765 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndfcb\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-kube-api-access-ndfcb\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740806 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.740858 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-registry-certificates\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.764528 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841762 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-trusted-ca\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841836 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-registry-tls\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841879 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-bound-sa-token\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841904 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0ede7f50-6746-417f-a057-629383bfcae7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841936 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0ede7f50-6746-417f-a057-629383bfcae7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.841978 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndfcb\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-kube-api-access-ndfcb\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.842211 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-registry-certificates\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.843410 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0ede7f50-6746-417f-a057-629383bfcae7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.844930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-trusted-ca\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.847446 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0ede7f50-6746-417f-a057-629383bfcae7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.847663 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-registry-tls\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.855027 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0ede7f50-6746-417f-a057-629383bfcae7-registry-certificates\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.859353 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndfcb\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-kube-api-access-ndfcb\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.860154 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ede7f50-6746-417f-a057-629383bfcae7-bound-sa-token\") pod \"image-registry-66df7c8f76-nqzlb\" (UID: \"0ede7f50-6746-417f-a057-629383bfcae7\") " pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:08 crc kubenswrapper[4793]: I0127 20:09:08.995112 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.324920 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.325771 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" podUID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" containerName="controller-manager" containerID="cri-o://680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642" gracePeriod=30 Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.706010 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-nqzlb"] Jan 27 20:09:09 crc kubenswrapper[4793]: W0127 20:09:09.733691 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ede7f50_6746_417f_a057_629383bfcae7.slice/crio-b806217fa78d7bb8efc63551d2465d42bac93ce83c7db9c94029abac9bbeef1b WatchSource:0}: Error finding container b806217fa78d7bb8efc63551d2465d42bac93ce83c7db9c94029abac9bbeef1b: Status 404 returned error can't find the container with id b806217fa78d7bb8efc63551d2465d42bac93ce83c7db9c94029abac9bbeef1b Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.882921 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.981623 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert\") pod \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.981687 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles\") pod \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.981735 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca\") pod \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.981777 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config\") pod \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.981804 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2q6q\" (UniqueName: \"kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q\") pod \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\" (UID: \"f9efa2d6-648d-4cc8-a1c9-961c041ff618\") " Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.982989 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca" (OuterVolumeSpecName: "client-ca") pod "f9efa2d6-648d-4cc8-a1c9-961c041ff618" (UID: "f9efa2d6-648d-4cc8-a1c9-961c041ff618"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.983473 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config" (OuterVolumeSpecName: "config") pod "f9efa2d6-648d-4cc8-a1c9-961c041ff618" (UID: "f9efa2d6-648d-4cc8-a1c9-961c041ff618"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.983523 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f9efa2d6-648d-4cc8-a1c9-961c041ff618" (UID: "f9efa2d6-648d-4cc8-a1c9-961c041ff618"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.988234 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q" (OuterVolumeSpecName: "kube-api-access-c2q6q") pod "f9efa2d6-648d-4cc8-a1c9-961c041ff618" (UID: "f9efa2d6-648d-4cc8-a1c9-961c041ff618"). InnerVolumeSpecName "kube-api-access-c2q6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:09 crc kubenswrapper[4793]: I0127 20:09:09.988315 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f9efa2d6-648d-4cc8-a1c9-961c041ff618" (UID: "f9efa2d6-648d-4cc8-a1c9-961c041ff618"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.083161 4793 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9efa2d6-648d-4cc8-a1c9-961c041ff618-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.083206 4793 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.083221 4793 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.083236 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9efa2d6-648d-4cc8-a1c9-961c041ff618-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.083247 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2q6q\" (UniqueName: \"kubernetes.io/projected/f9efa2d6-648d-4cc8-a1c9-961c041ff618-kube-api-access-c2q6q\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.163335 4793 generic.go:334] "Generic (PLEG): container finished" podID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" containerID="680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642" exitCode=0 Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.163393 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.163413 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" event={"ID":"f9efa2d6-648d-4cc8-a1c9-961c041ff618","Type":"ContainerDied","Data":"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642"} Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.163859 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq" event={"ID":"f9efa2d6-648d-4cc8-a1c9-961c041ff618","Type":"ContainerDied","Data":"4dc0959bbb45d40d629ed036f089dc3bd883a7091c84d77eaf98cf4f3302adda"} Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.163882 4793 scope.go:117] "RemoveContainer" containerID="680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.166979 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" event={"ID":"0ede7f50-6746-417f-a057-629383bfcae7","Type":"ContainerStarted","Data":"8c5e33d8044746b47abd8e960f20e1852adea61dd6eba01885707a5b14136a92"} Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.167019 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" event={"ID":"0ede7f50-6746-417f-a057-629383bfcae7","Type":"ContainerStarted","Data":"b806217fa78d7bb8efc63551d2465d42bac93ce83c7db9c94029abac9bbeef1b"} Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.167336 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.186199 4793 scope.go:117] "RemoveContainer" containerID="680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.190592 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" podStartSLOduration=2.190572012 podStartE2EDuration="2.190572012s" podCreationTimestamp="2026-01-27 20:09:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:09:10.187397585 +0000 UTC m=+375.577650751" watchObservedRunningTime="2026-01-27 20:09:10.190572012 +0000 UTC m=+375.580825158" Jan 27 20:09:10 crc kubenswrapper[4793]: E0127 20:09:10.190934 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642\": container with ID starting with 680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642 not found: ID does not exist" containerID="680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.191088 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642"} err="failed to get container status \"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642\": rpc error: code = NotFound desc = could not find container \"680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642\": container with ID starting with 680fc6e984c936b5e89fcb966799f7a8ed4388976278484a32a932bb898ed642 not found: ID does not exist" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.200842 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.204253 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6dfcd5c5b4-797wq"] Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.611791 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-2gg52"] Jan 27 20:09:10 crc kubenswrapper[4793]: E0127 20:09:10.612057 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" containerName="controller-manager" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.612071 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" containerName="controller-manager" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.612183 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" containerName="controller-manager" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.612610 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.615337 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.620654 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.623780 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.623961 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.624784 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.625303 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.625395 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.627028 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-config\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.627105 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-client-ca\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.627137 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.627160 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjj2q\" (UniqueName: \"kubernetes.io/projected/57a1afbd-5e30-4210-a1de-5d3aaf13b469-kube-api-access-kjj2q\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.627197 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57a1afbd-5e30-4210-a1de-5d3aaf13b469-serving-cert\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.643364 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-2gg52"] Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.728633 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjj2q\" (UniqueName: \"kubernetes.io/projected/57a1afbd-5e30-4210-a1de-5d3aaf13b469-kube-api-access-kjj2q\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.729626 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57a1afbd-5e30-4210-a1de-5d3aaf13b469-serving-cert\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.729802 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-config\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.729927 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-client-ca\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.730064 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.731478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-config\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.731589 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-proxy-ca-bundles\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.731728 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57a1afbd-5e30-4210-a1de-5d3aaf13b469-client-ca\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.744455 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57a1afbd-5e30-4210-a1de-5d3aaf13b469-serving-cert\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.746926 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjj2q\" (UniqueName: \"kubernetes.io/projected/57a1afbd-5e30-4210-a1de-5d3aaf13b469-kube-api-access-kjj2q\") pod \"controller-manager-68ff5ff7df-2gg52\" (UID: \"57a1afbd-5e30-4210-a1de-5d3aaf13b469\") " pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:10 crc kubenswrapper[4793]: I0127 20:09:10.949798 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:11 crc kubenswrapper[4793]: I0127 20:09:11.290991 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68ff5ff7df-2gg52"] Jan 27 20:09:11 crc kubenswrapper[4793]: W0127 20:09:11.300614 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57a1afbd_5e30_4210_a1de_5d3aaf13b469.slice/crio-6f4873c141cc60d763083d71fcdf8f9983e31d772006eb621939db1ba72da154 WatchSource:0}: Error finding container 6f4873c141cc60d763083d71fcdf8f9983e31d772006eb621939db1ba72da154: Status 404 returned error can't find the container with id 6f4873c141cc60d763083d71fcdf8f9983e31d772006eb621939db1ba72da154 Jan 27 20:09:11 crc kubenswrapper[4793]: I0127 20:09:11.811140 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9efa2d6-648d-4cc8-a1c9-961c041ff618" path="/var/lib/kubelet/pods/f9efa2d6-648d-4cc8-a1c9-961c041ff618/volumes" Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.213484 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" event={"ID":"57a1afbd-5e30-4210-a1de-5d3aaf13b469","Type":"ContainerStarted","Data":"191fdf8a13179918d093752e8164683baacc610ce06ce42356a6486d21149ece"} Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.213523 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" event={"ID":"57a1afbd-5e30-4210-a1de-5d3aaf13b469","Type":"ContainerStarted","Data":"6f4873c141cc60d763083d71fcdf8f9983e31d772006eb621939db1ba72da154"} Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.214009 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.229398 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" podStartSLOduration=3.229384656 podStartE2EDuration="3.229384656s" podCreationTimestamp="2026-01-27 20:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:09:12.228180486 +0000 UTC m=+377.618433642" watchObservedRunningTime="2026-01-27 20:09:12.229384656 +0000 UTC m=+377.619637802" Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.233715 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-68ff5ff7df-2gg52" Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.680014 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:09:12 crc kubenswrapper[4793]: I0127 20:09:12.680497 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2lmqb" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="registry-server" containerID="cri-o://a087f85b6c2bb28a2484f4779411ca8ce7d91b9e49012a8e1da1c405f2caf8dc" gracePeriod=2 Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.359207 4793 generic.go:334] "Generic (PLEG): container finished" podID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerID="a087f85b6c2bb28a2484f4779411ca8ce7d91b9e49012a8e1da1c405f2caf8dc" exitCode=0 Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.359286 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerDied","Data":"a087f85b6c2bb28a2484f4779411ca8ce7d91b9e49012a8e1da1c405f2caf8dc"} Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.575753 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.752399 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx95k\" (UniqueName: \"kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k\") pod \"f7506fff-3cb5-42dd-80c3-203b1354c70d\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.752632 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities\") pod \"f7506fff-3cb5-42dd-80c3-203b1354c70d\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.752708 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content\") pod \"f7506fff-3cb5-42dd-80c3-203b1354c70d\" (UID: \"f7506fff-3cb5-42dd-80c3-203b1354c70d\") " Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.753429 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities" (OuterVolumeSpecName: "utilities") pod "f7506fff-3cb5-42dd-80c3-203b1354c70d" (UID: "f7506fff-3cb5-42dd-80c3-203b1354c70d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.757331 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k" (OuterVolumeSpecName: "kube-api-access-vx95k") pod "f7506fff-3cb5-42dd-80c3-203b1354c70d" (UID: "f7506fff-3cb5-42dd-80c3-203b1354c70d"). InnerVolumeSpecName "kube-api-access-vx95k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.854622 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx95k\" (UniqueName: \"kubernetes.io/projected/f7506fff-3cb5-42dd-80c3-203b1354c70d-kube-api-access-vx95k\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.854683 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.883769 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7506fff-3cb5-42dd-80c3-203b1354c70d" (UID: "f7506fff-3cb5-42dd-80c3-203b1354c70d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:13 crc kubenswrapper[4793]: I0127 20:09:13.956477 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7506fff-3cb5-42dd-80c3-203b1354c70d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.365880 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2lmqb" Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.365890 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2lmqb" event={"ID":"f7506fff-3cb5-42dd-80c3-203b1354c70d","Type":"ContainerDied","Data":"e911b40cfff3c4b8e0fd34892d2b16467bb3be27adf485dec1c0585c2d9f4938"} Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.366336 4793 scope.go:117] "RemoveContainer" containerID="a087f85b6c2bb28a2484f4779411ca8ce7d91b9e49012a8e1da1c405f2caf8dc" Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.391149 4793 scope.go:117] "RemoveContainer" containerID="ba3fcb727ed2dc071079d6fc21c91852c4bc5deff9b3c030c67a3eae4f652ebc" Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.398781 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.403342 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2lmqb"] Jan 27 20:09:14 crc kubenswrapper[4793]: I0127 20:09:14.429951 4793 scope.go:117] "RemoveContainer" containerID="0567c91e1e506036e221f2253199d87f41090c09f93c5a922498a1ce3b562f23" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.084632 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.085146 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qmlkz" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="registry-server" containerID="cri-o://a6c7e702fdf6fd42a58962286b4e81951b4f6c5e9fff05286a95c2d15d341835" gracePeriod=2 Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.285976 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.286268 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jh9bl" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="registry-server" containerID="cri-o://7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2" gracePeriod=2 Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.385774 4793 generic.go:334] "Generic (PLEG): container finished" podID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerID="a6c7e702fdf6fd42a58962286b4e81951b4f6c5e9fff05286a95c2d15d341835" exitCode=0 Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.385831 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerDied","Data":"a6c7e702fdf6fd42a58962286b4e81951b4f6c5e9fff05286a95c2d15d341835"} Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.504298 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.685296 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities\") pod \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.685731 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content\") pod \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.685754 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sk97f\" (UniqueName: \"kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f\") pod \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\" (UID: \"345c96d4-a84a-4d09-9d94-f68e4c3bff9b\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.686825 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities" (OuterVolumeSpecName: "utilities") pod "345c96d4-a84a-4d09-9d94-f68e4c3bff9b" (UID: "345c96d4-a84a-4d09-9d94-f68e4c3bff9b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.699189 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f" (OuterVolumeSpecName: "kube-api-access-sk97f") pod "345c96d4-a84a-4d09-9d94-f68e4c3bff9b" (UID: "345c96d4-a84a-4d09-9d94-f68e4c3bff9b"). InnerVolumeSpecName "kube-api-access-sk97f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.753967 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "345c96d4-a84a-4d09-9d94-f68e4c3bff9b" (UID: "345c96d4-a84a-4d09-9d94-f68e4c3bff9b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.765714 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.790226 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.790274 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.790286 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sk97f\" (UniqueName: \"kubernetes.io/projected/345c96d4-a84a-4d09-9d94-f68e4c3bff9b-kube-api-access-sk97f\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.816376 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" path="/var/lib/kubelet/pods/f7506fff-3cb5-42dd-80c3-203b1354c70d/volumes" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.891295 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9btv\" (UniqueName: \"kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv\") pod \"18834635-b900-480e-844b-4c075b169d4a\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.891589 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content\") pod \"18834635-b900-480e-844b-4c075b169d4a\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.891642 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities\") pod \"18834635-b900-480e-844b-4c075b169d4a\" (UID: \"18834635-b900-480e-844b-4c075b169d4a\") " Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.892638 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities" (OuterVolumeSpecName: "utilities") pod "18834635-b900-480e-844b-4c075b169d4a" (UID: "18834635-b900-480e-844b-4c075b169d4a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.894599 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv" (OuterVolumeSpecName: "kube-api-access-n9btv") pod "18834635-b900-480e-844b-4c075b169d4a" (UID: "18834635-b900-480e-844b-4c075b169d4a"). InnerVolumeSpecName "kube-api-access-n9btv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.951979 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "18834635-b900-480e-844b-4c075b169d4a" (UID: "18834635-b900-480e-844b-4c075b169d4a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.993068 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.993104 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18834635-b900-480e-844b-4c075b169d4a-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:15 crc kubenswrapper[4793]: I0127 20:09:15.993117 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9btv\" (UniqueName: \"kubernetes.io/projected/18834635-b900-480e-844b-4c075b169d4a-kube-api-access-n9btv\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.406470 4793 generic.go:334] "Generic (PLEG): container finished" podID="18834635-b900-480e-844b-4c075b169d4a" containerID="7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2" exitCode=0 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.407667 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerDied","Data":"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2"} Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.407684 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jh9bl" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.407909 4793 scope.go:117] "RemoveContainer" containerID="7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.407884 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jh9bl" event={"ID":"18834635-b900-480e-844b-4c075b169d4a","Type":"ContainerDied","Data":"556990d3d93a4567156f437f38e11621b8d93e56a13efb41a7fc1a5b1ca6aec3"} Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.411787 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmlkz" event={"ID":"345c96d4-a84a-4d09-9d94-f68e4c3bff9b","Type":"ContainerDied","Data":"3ac637c5af0402a1b0b4f7d7b0ca255e3a2b5a3273be466c597b433ff6844572"} Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.411866 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmlkz" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.449263 4793 scope.go:117] "RemoveContainer" containerID="753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.458618 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.471486 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qmlkz"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.471608 4793 scope.go:117] "RemoveContainer" containerID="e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.482602 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.493193 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jh9bl"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.500062 4793 scope.go:117] "RemoveContainer" containerID="7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.500610 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2\": container with ID starting with 7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2 not found: ID does not exist" containerID="7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.500753 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2"} err="failed to get container status \"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2\": rpc error: code = NotFound desc = could not find container \"7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2\": container with ID starting with 7414776ee4d5151c8f3e70e447bca56d01e87c3c3ab9e2b5a2fa8efa4a704de2 not found: ID does not exist" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.500865 4793 scope.go:117] "RemoveContainer" containerID="753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.501396 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240\": container with ID starting with 753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240 not found: ID does not exist" containerID="753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.501445 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240"} err="failed to get container status \"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240\": rpc error: code = NotFound desc = could not find container \"753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240\": container with ID starting with 753b12be0f4ee3494a1942bb75d332c644b0f024e9eecf4533057be89b601240 not found: ID does not exist" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.501479 4793 scope.go:117] "RemoveContainer" containerID="e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.501749 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe\": container with ID starting with e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe not found: ID does not exist" containerID="e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.501852 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe"} err="failed to get container status \"e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe\": rpc error: code = NotFound desc = could not find container \"e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe\": container with ID starting with e28895808e9c55135c7186181cb778365c77de4390cf75b859ae2635db4376fe not found: ID does not exist" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.501939 4793 scope.go:117] "RemoveContainer" containerID="a6c7e702fdf6fd42a58962286b4e81951b4f6c5e9fff05286a95c2d15d341835" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.520665 4793 scope.go:117] "RemoveContainer" containerID="aeefc2f50d62af58885d28a0eea62a181bd37cb8dd7259ed11851536ef92e34a" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.538100 4793 scope.go:117] "RemoveContainer" containerID="5e0dfa79b4dc30a7a2e7ff633cffe59d4e9160af5337cf40ca8c9f886d06bd46" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.777391 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.777702 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lcdjd" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="registry-server" containerID="cri-o://a53da694149f5f95c0e5e85c152e8f829aba24dcb7c1acac97c30af331a73b63" gracePeriod=30 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.786102 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.786414 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n2ntn" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="registry-server" containerID="cri-o://93debde36ad152a78d5a5e980c5a867675829b0ae7c0fda8786bdb7882b5ad77" gracePeriod=30 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.795088 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.795318 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" containerID="cri-o://fc7a8978ee915d5acdf909d7b613d8479e4927eb34158dffea8b0e0e84f249c4" gracePeriod=30 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.809134 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.809409 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tcslq" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="registry-server" containerID="cri-o://6628af0c6148bca8f585541496e94f0e3e256f6329be0fdc93453d1d345d1b98" gracePeriod=30 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.815248 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.815473 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j7v8p" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="registry-server" containerID="cri-o://6d88a635cb013c5b99481cd6f34b491c61da13b41ed0d75cb1be2740101857ff" gracePeriod=30 Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.833040 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m7kkq"] Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.833721 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.833825 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.833915 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.833981 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834050 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834136 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834225 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834293 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834358 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834454 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="extract-content" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834518 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834669 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834759 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834842 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.834913 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.834983 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: E0127 20:09:16.835063 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.835130 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="extract-utilities" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.835343 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="18834635-b900-480e-844b-4c075b169d4a" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.835445 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.835532 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7506fff-3cb5-42dd-80c3-203b1354c70d" containerName="registry-server" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.836109 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:16 crc kubenswrapper[4793]: I0127 20:09:16.842295 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m7kkq"] Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.006651 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.006951 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jctqg\" (UniqueName: \"kubernetes.io/projected/92d8b5ca-b574-449a-a93b-43722e02e624-kube-api-access-jctqg\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.007080 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.108096 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.108170 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jctqg\" (UniqueName: \"kubernetes.io/projected/92d8b5ca-b574-449a-a93b-43722e02e624-kube-api-access-jctqg\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.108215 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.110684 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.115231 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/92d8b5ca-b574-449a-a93b-43722e02e624-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.131842 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jctqg\" (UniqueName: \"kubernetes.io/projected/92d8b5ca-b574-449a-a93b-43722e02e624-kube-api-access-jctqg\") pod \"marketplace-operator-79b997595-m7kkq\" (UID: \"92d8b5ca-b574-449a-a93b-43722e02e624\") " pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.156171 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.353722 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m7kkq"] Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.420271 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" event={"ID":"92d8b5ca-b574-449a-a93b-43722e02e624","Type":"ContainerStarted","Data":"53dd5f80eea99258115406a6888aae6c00ad9d197ace7d8d5b3472e841696178"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.425616 4793 generic.go:334] "Generic (PLEG): container finished" podID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerID="93debde36ad152a78d5a5e980c5a867675829b0ae7c0fda8786bdb7882b5ad77" exitCode=0 Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.425661 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerDied","Data":"93debde36ad152a78d5a5e980c5a867675829b0ae7c0fda8786bdb7882b5ad77"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.433896 4793 generic.go:334] "Generic (PLEG): container finished" podID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerID="6628af0c6148bca8f585541496e94f0e3e256f6329be0fdc93453d1d345d1b98" exitCode=0 Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.433949 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerDied","Data":"6628af0c6148bca8f585541496e94f0e3e256f6329be0fdc93453d1d345d1b98"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.441319 4793 generic.go:334] "Generic (PLEG): container finished" podID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerID="a53da694149f5f95c0e5e85c152e8f829aba24dcb7c1acac97c30af331a73b63" exitCode=0 Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.441414 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerDied","Data":"a53da694149f5f95c0e5e85c152e8f829aba24dcb7c1acac97c30af331a73b63"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.444045 4793 generic.go:334] "Generic (PLEG): container finished" podID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerID="6d88a635cb013c5b99481cd6f34b491c61da13b41ed0d75cb1be2740101857ff" exitCode=0 Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.444118 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerDied","Data":"6d88a635cb013c5b99481cd6f34b491c61da13b41ed0d75cb1be2740101857ff"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.449797 4793 generic.go:334] "Generic (PLEG): container finished" podID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerID="fc7a8978ee915d5acdf909d7b613d8479e4927eb34158dffea8b0e0e84f249c4" exitCode=0 Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.449838 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerDied","Data":"fc7a8978ee915d5acdf909d7b613d8479e4927eb34158dffea8b0e0e84f249c4"} Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.449871 4793 scope.go:117] "RemoveContainer" containerID="4471df472af763cae8ddf7904d3cba15132e1f1e747ad9bf69f21eb469057097" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.668556 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.739500 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.761875 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.765235 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.806458 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.809433 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18834635-b900-480e-844b-4c075b169d4a" path="/var/lib/kubelet/pods/18834635-b900-480e-844b-4c075b169d4a/volumes" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.810997 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="345c96d4-a84a-4d09-9d94-f68e4c3bff9b" path="/var/lib/kubelet/pods/345c96d4-a84a-4d09-9d94-f68e4c3bff9b/volumes" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835586 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgjqm\" (UniqueName: \"kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm\") pod \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835653 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8qvq\" (UniqueName: \"kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq\") pod \"50ff3901-4109-4f4e-9933-20bccf83d99d\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835690 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content\") pod \"50ff3901-4109-4f4e-9933-20bccf83d99d\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835709 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities\") pod \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835745 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities\") pod \"50ff3901-4109-4f4e-9933-20bccf83d99d\" (UID: \"50ff3901-4109-4f4e-9933-20bccf83d99d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.835776 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content\") pod \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\" (UID: \"0bcb229d-7351-4a1f-9a61-8c54a7ee039c\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.836557 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities" (OuterVolumeSpecName: "utilities") pod "0bcb229d-7351-4a1f-9a61-8c54a7ee039c" (UID: "0bcb229d-7351-4a1f-9a61-8c54a7ee039c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.836538 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities" (OuterVolumeSpecName: "utilities") pod "50ff3901-4109-4f4e-9933-20bccf83d99d" (UID: "50ff3901-4109-4f4e-9933-20bccf83d99d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.864712 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq" (OuterVolumeSpecName: "kube-api-access-f8qvq") pod "50ff3901-4109-4f4e-9933-20bccf83d99d" (UID: "50ff3901-4109-4f4e-9933-20bccf83d99d"). InnerVolumeSpecName "kube-api-access-f8qvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.865075 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm" (OuterVolumeSpecName: "kube-api-access-rgjqm") pod "0bcb229d-7351-4a1f-9a61-8c54a7ee039c" (UID: "0bcb229d-7351-4a1f-9a61-8c54a7ee039c"). InnerVolumeSpecName "kube-api-access-rgjqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.902806 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "50ff3901-4109-4f4e-9933-20bccf83d99d" (UID: "50ff3901-4109-4f4e-9933-20bccf83d99d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.917158 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0bcb229d-7351-4a1f-9a61-8c54a7ee039c" (UID: "0bcb229d-7351-4a1f-9a61-8c54a7ee039c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.938340 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities\") pod \"ef02211f-9add-4072-aa2d-4df47b879c0d\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.938658 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb6cz\" (UniqueName: \"kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz\") pod \"5493a5b8-666b-4e96-8912-e8ddc28327fe\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.938829 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics\") pod \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.938963 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca\") pod \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939130 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities" (OuterVolumeSpecName: "utilities") pod "ef02211f-9add-4072-aa2d-4df47b879c0d" (UID: "ef02211f-9add-4072-aa2d-4df47b879c0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939283 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x52cs\" (UniqueName: \"kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs\") pod \"ef02211f-9add-4072-aa2d-4df47b879c0d\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939340 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" (UID: "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939587 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content\") pod \"ef02211f-9add-4072-aa2d-4df47b879c0d\" (UID: \"ef02211f-9add-4072-aa2d-4df47b879c0d\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939737 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slr7v\" (UniqueName: \"kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v\") pod \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\" (UID: \"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939857 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content\") pod \"5493a5b8-666b-4e96-8912-e8ddc28327fe\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.939954 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities\") pod \"5493a5b8-666b-4e96-8912-e8ddc28327fe\" (UID: \"5493a5b8-666b-4e96-8912-e8ddc28327fe\") " Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.940677 4793 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.940774 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8qvq\" (UniqueName: \"kubernetes.io/projected/50ff3901-4109-4f4e-9933-20bccf83d99d-kube-api-access-f8qvq\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.940845 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.940928 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.941044 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff3901-4109-4f4e-9933-20bccf83d99d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.941128 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.941209 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.941291 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgjqm\" (UniqueName: \"kubernetes.io/projected/0bcb229d-7351-4a1f-9a61-8c54a7ee039c-kube-api-access-rgjqm\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.941203 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities" (OuterVolumeSpecName: "utilities") pod "5493a5b8-666b-4e96-8912-e8ddc28327fe" (UID: "5493a5b8-666b-4e96-8912-e8ddc28327fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.942998 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs" (OuterVolumeSpecName: "kube-api-access-x52cs") pod "ef02211f-9add-4072-aa2d-4df47b879c0d" (UID: "ef02211f-9add-4072-aa2d-4df47b879c0d"). InnerVolumeSpecName "kube-api-access-x52cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.943036 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" (UID: "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.943645 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v" (OuterVolumeSpecName: "kube-api-access-slr7v") pod "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" (UID: "1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187"). InnerVolumeSpecName "kube-api-access-slr7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.944009 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz" (OuterVolumeSpecName: "kube-api-access-hb6cz") pod "5493a5b8-666b-4e96-8912-e8ddc28327fe" (UID: "5493a5b8-666b-4e96-8912-e8ddc28327fe"). InnerVolumeSpecName "kube-api-access-hb6cz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:17 crc kubenswrapper[4793]: I0127 20:09:17.966505 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5493a5b8-666b-4e96-8912-e8ddc28327fe" (UID: "5493a5b8-666b-4e96-8912-e8ddc28327fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043258 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x52cs\" (UniqueName: \"kubernetes.io/projected/ef02211f-9add-4072-aa2d-4df47b879c0d-kube-api-access-x52cs\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043295 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slr7v\" (UniqueName: \"kubernetes.io/projected/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-kube-api-access-slr7v\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043309 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043323 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5493a5b8-666b-4e96-8912-e8ddc28327fe-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043335 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb6cz\" (UniqueName: \"kubernetes.io/projected/5493a5b8-666b-4e96-8912-e8ddc28327fe-kube-api-access-hb6cz\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.043347 4793 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.070471 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef02211f-9add-4072-aa2d-4df47b879c0d" (UID: "ef02211f-9add-4072-aa2d-4df47b879c0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.144404 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef02211f-9add-4072-aa2d-4df47b879c0d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.459055 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lcdjd" event={"ID":"50ff3901-4109-4f4e-9933-20bccf83d99d","Type":"ContainerDied","Data":"424fac4d77a4a63c61d10b875ca890f8eb080fba4101b18ec16eca039ec4d6f7"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.460658 4793 scope.go:117] "RemoveContainer" containerID="a53da694149f5f95c0e5e85c152e8f829aba24dcb7c1acac97c30af331a73b63" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.459370 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lcdjd" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.461492 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7v8p" event={"ID":"ef02211f-9add-4072-aa2d-4df47b879c0d","Type":"ContainerDied","Data":"f605db4bf9b8b3b8e2f89054255aa5fb8e480467fc9338ea3364a45c8e795201"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.461537 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7v8p" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.463772 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.463782 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vbqcn" event={"ID":"1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187","Type":"ContainerDied","Data":"6862ce06f7c37bce5e9c8bfbfb54d3d930d9768d3d8b74ef24b2d97eef1fcf4d"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.465732 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" event={"ID":"92d8b5ca-b574-449a-a93b-43722e02e624","Type":"ContainerStarted","Data":"00c33f46050718707877a59f47cb47cac9cd9d0b84b7aef2f4fc305c51b02bff"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.466497 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.467804 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2ntn" event={"ID":"0bcb229d-7351-4a1f-9a61-8c54a7ee039c","Type":"ContainerDied","Data":"79b699cbe7ef984c47848ac6f9549a3f9058df7f4fe8aa1da65e4c84b10df874"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.468063 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2ntn" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.469667 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcslq" event={"ID":"5493a5b8-666b-4e96-8912-e8ddc28327fe","Type":"ContainerDied","Data":"d16c4178252403efd4a7f539e92d393fd56d092d6bd43be70405a65fbe6af0b6"} Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.469749 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcslq" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.471811 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.481143 4793 scope.go:117] "RemoveContainer" containerID="d2d2ba29b2fac29092f29c989f0f821d9590dad38119b2ef709c331188da3e73" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.490685 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-m7kkq" podStartSLOduration=2.490669016 podStartE2EDuration="2.490669016s" podCreationTimestamp="2026-01-27 20:09:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:09:18.489267482 +0000 UTC m=+383.879520668" watchObservedRunningTime="2026-01-27 20:09:18.490669016 +0000 UTC m=+383.880922162" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.502147 4793 scope.go:117] "RemoveContainer" containerID="3a4a5e79ec2ee1178188dec61e4fb57a1b40f2ceff6bd8545af8e26ed8355184" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.532674 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.534401 4793 scope.go:117] "RemoveContainer" containerID="6d88a635cb013c5b99481cd6f34b491c61da13b41ed0d75cb1be2740101857ff" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.535409 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vbqcn"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.540059 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.543666 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n2ntn"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.556798 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.556855 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcslq"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.562692 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.564807 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lcdjd"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.571923 4793 scope.go:117] "RemoveContainer" containerID="423ed445f92cf65c218b6d1b3e342ab5f05f9fb9c1d68833c73af6490a1bb6df" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.572536 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.575602 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j7v8p"] Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.588535 4793 scope.go:117] "RemoveContainer" containerID="dc29d462435bebb6bf49c488f5fa6a0fc79502bf1f2e01c6980531de0d7f2d27" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.601997 4793 scope.go:117] "RemoveContainer" containerID="fc7a8978ee915d5acdf909d7b613d8479e4927eb34158dffea8b0e0e84f249c4" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.617568 4793 scope.go:117] "RemoveContainer" containerID="93debde36ad152a78d5a5e980c5a867675829b0ae7c0fda8786bdb7882b5ad77" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.630482 4793 scope.go:117] "RemoveContainer" containerID="df2647518f409ee73582987083cdf0650745013fd8c7ca509578caaf2187ae59" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.643836 4793 scope.go:117] "RemoveContainer" containerID="804ecdda7805934fd8effe9da766db00a3bb7e762ecb93afa307b9bae38d0eae" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.657985 4793 scope.go:117] "RemoveContainer" containerID="6628af0c6148bca8f585541496e94f0e3e256f6329be0fdc93453d1d345d1b98" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.670451 4793 scope.go:117] "RemoveContainer" containerID="0141f24bdb4c844649fb8c03b003a6e5065a11bde6a2176ca5ae6be81beea550" Jan 27 20:09:18 crc kubenswrapper[4793]: I0127 20:09:18.683584 4793 scope.go:117] "RemoveContainer" containerID="819eb6fbed1ba503d50f7093e71b70de43dd474ac1257680a63f552f449911f7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487183 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487665 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487681 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487691 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487699 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487709 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487715 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487725 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487731 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487738 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487746 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487756 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487762 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487772 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487778 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487786 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487792 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487802 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487811 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487823 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487830 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487837 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487843 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487851 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487857 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487867 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487874 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="extract-utilities" Jan 27 20:09:19 crc kubenswrapper[4793]: E0127 20:09:19.487883 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487891 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="extract-content" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.487986 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.488001 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.488011 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" containerName="marketplace-operator" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.488020 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.488030 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.488040 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" containerName="registry-server" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.489145 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.494136 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.499530 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.667717 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwqw5\" (UniqueName: \"kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.667910 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.667959 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.769657 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwqw5\" (UniqueName: \"kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.769739 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.769801 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.770339 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.770630 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.788601 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwqw5\" (UniqueName: \"kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5\") pod \"certified-operators-jjsf7\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.810422 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bcb229d-7351-4a1f-9a61-8c54a7ee039c" path="/var/lib/kubelet/pods/0bcb229d-7351-4a1f-9a61-8c54a7ee039c/volumes" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.811128 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187" path="/var/lib/kubelet/pods/1b91dcf5-7155-41f8-b5b0-2f0bdf4c7187/volumes" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.811650 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50ff3901-4109-4f4e-9933-20bccf83d99d" path="/var/lib/kubelet/pods/50ff3901-4109-4f4e-9933-20bccf83d99d/volumes" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.812276 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5493a5b8-666b-4e96-8912-e8ddc28327fe" path="/var/lib/kubelet/pods/5493a5b8-666b-4e96-8912-e8ddc28327fe/volumes" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.813522 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef02211f-9add-4072-aa2d-4df47b879c0d" path="/var/lib/kubelet/pods/ef02211f-9add-4072-aa2d-4df47b879c0d/volumes" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.818843 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.889791 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6z2jf"] Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.890819 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.894187 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 20:09:19 crc kubenswrapper[4793]: I0127 20:09:19.917129 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6z2jf"] Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.042059 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:09:20 crc kubenswrapper[4793]: W0127 20:09:20.048378 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb008ec09_553c_474f_9176_14405d193a65.slice/crio-bc58c924170fe76240d6c7c1d018e1d96e50b10e74dfc5298a8bf79db1e2223e WatchSource:0}: Error finding container bc58c924170fe76240d6c7c1d018e1d96e50b10e74dfc5298a8bf79db1e2223e: Status 404 returned error can't find the container with id bc58c924170fe76240d6c7c1d018e1d96e50b10e74dfc5298a8bf79db1e2223e Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.073649 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shpvg\" (UniqueName: \"kubernetes.io/projected/722970b7-fdc7-44ab-a809-4e55d8ac772a-kube-api-access-shpvg\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.073699 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-catalog-content\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.073772 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-utilities\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.174514 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-utilities\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.174634 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shpvg\" (UniqueName: \"kubernetes.io/projected/722970b7-fdc7-44ab-a809-4e55d8ac772a-kube-api-access-shpvg\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.174667 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-catalog-content\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.175025 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-utilities\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.175094 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/722970b7-fdc7-44ab-a809-4e55d8ac772a-catalog-content\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.194775 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shpvg\" (UniqueName: \"kubernetes.io/projected/722970b7-fdc7-44ab-a809-4e55d8ac772a-kube-api-access-shpvg\") pod \"community-operators-6z2jf\" (UID: \"722970b7-fdc7-44ab-a809-4e55d8ac772a\") " pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.210016 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.376705 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6z2jf"] Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.503083 4793 generic.go:334] "Generic (PLEG): container finished" podID="b008ec09-553c-474f-9176-14405d193a65" containerID="5ca24c2da78c21441326a609298e9a3254d53e94d29dfeed39407d64f2b16125" exitCode=0 Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.503172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerDied","Data":"5ca24c2da78c21441326a609298e9a3254d53e94d29dfeed39407d64f2b16125"} Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.503220 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerStarted","Data":"bc58c924170fe76240d6c7c1d018e1d96e50b10e74dfc5298a8bf79db1e2223e"} Jan 27 20:09:20 crc kubenswrapper[4793]: I0127 20:09:20.505371 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6z2jf" event={"ID":"722970b7-fdc7-44ab-a809-4e55d8ac772a","Type":"ContainerStarted","Data":"d73bf6c7f3cc42344db33ccd80c8676856f25893ebc1039c790a0011cdd82e17"} Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.513754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerStarted","Data":"bb28b38607830e661ec67b365ef3c75d56fba4877068c15f480b81515b3e7cd8"} Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.516793 4793 generic.go:334] "Generic (PLEG): container finished" podID="722970b7-fdc7-44ab-a809-4e55d8ac772a" containerID="823201a2d518b06925c94afa68180c6ecef3eac5f0376ff5b5b224526f54ed57" exitCode=0 Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.516854 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6z2jf" event={"ID":"722970b7-fdc7-44ab-a809-4e55d8ac772a","Type":"ContainerDied","Data":"823201a2d518b06925c94afa68180c6ecef3eac5f0376ff5b5b224526f54ed57"} Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.687719 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.689040 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.693615 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.697629 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.798081 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llsqz\" (UniqueName: \"kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.798154 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.798212 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.899888 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llsqz\" (UniqueName: \"kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.899981 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.900058 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.901018 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.907194 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:21 crc kubenswrapper[4793]: I0127 20:09:21.922399 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llsqz\" (UniqueName: \"kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz\") pod \"redhat-marketplace-dndx6\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.015712 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.206995 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 20:09:22 crc kubenswrapper[4793]: W0127 20:09:22.209939 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7e8d64f_44af_47e1_a656_34aace0833cf.slice/crio-8f6933c5c59e2cf118dd869edec9538a2ead232103d9c1dc0140d7b49563eaf2 WatchSource:0}: Error finding container 8f6933c5c59e2cf118dd869edec9538a2ead232103d9c1dc0140d7b49563eaf2: Status 404 returned error can't find the container with id 8f6933c5c59e2cf118dd869edec9538a2ead232103d9c1dc0140d7b49563eaf2 Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.300512 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.305742 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.305880 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.308972 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.407320 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.407801 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.407856 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qmtp\" (UniqueName: \"kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.509202 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.509279 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qmtp\" (UniqueName: \"kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.509320 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.509822 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.509874 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.524889 4793 generic.go:334] "Generic (PLEG): container finished" podID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerID="95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5" exitCode=0 Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.525007 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerDied","Data":"95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5"} Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.525075 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerStarted","Data":"8f6933c5c59e2cf118dd869edec9538a2ead232103d9c1dc0140d7b49563eaf2"} Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.528902 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qmtp\" (UniqueName: \"kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp\") pod \"redhat-operators-6vqft\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.531802 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6z2jf" event={"ID":"722970b7-fdc7-44ab-a809-4e55d8ac772a","Type":"ContainerStarted","Data":"52cf3c70966e13a241d0f5dc447cd4a2d604eaf621542730ad85c61b4dd27470"} Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.536341 4793 generic.go:334] "Generic (PLEG): container finished" podID="b008ec09-553c-474f-9176-14405d193a65" containerID="bb28b38607830e661ec67b365ef3c75d56fba4877068c15f480b81515b3e7cd8" exitCode=0 Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.536374 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerDied","Data":"bb28b38607830e661ec67b365ef3c75d56fba4877068c15f480b81515b3e7cd8"} Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.643175 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.753444 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:09:22 crc kubenswrapper[4793]: I0127 20:09:22.753875 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.067658 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.545414 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerStarted","Data":"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f"} Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.547848 4793 generic.go:334] "Generic (PLEG): container finished" podID="722970b7-fdc7-44ab-a809-4e55d8ac772a" containerID="52cf3c70966e13a241d0f5dc447cd4a2d604eaf621542730ad85c61b4dd27470" exitCode=0 Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.547940 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6z2jf" event={"ID":"722970b7-fdc7-44ab-a809-4e55d8ac772a","Type":"ContainerDied","Data":"52cf3c70966e13a241d0f5dc447cd4a2d604eaf621542730ad85c61b4dd27470"} Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.550088 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerStarted","Data":"21aa9d714ae0a321386da7ef447a780192a9faaf6bcb8784c7565f7ab18713b6"} Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.551777 4793 generic.go:334] "Generic (PLEG): container finished" podID="48ec0076-1321-431f-8d4a-06ab47d87847" containerID="266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508" exitCode=0 Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.551820 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerDied","Data":"266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508"} Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.551851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerStarted","Data":"012467b77d8f9f97b2300c6147a4a469871f838fac23be1dac915b08055711d6"} Jan 27 20:09:23 crc kubenswrapper[4793]: I0127 20:09:23.614419 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jjsf7" podStartSLOduration=2.037348104 podStartE2EDuration="4.614398713s" podCreationTimestamp="2026-01-27 20:09:19 +0000 UTC" firstStartedPulling="2026-01-27 20:09:20.504951951 +0000 UTC m=+385.895205107" lastFinishedPulling="2026-01-27 20:09:23.08200256 +0000 UTC m=+388.472255716" observedRunningTime="2026-01-27 20:09:23.611376859 +0000 UTC m=+389.001630015" watchObservedRunningTime="2026-01-27 20:09:23.614398713 +0000 UTC m=+389.004651869" Jan 27 20:09:24 crc kubenswrapper[4793]: I0127 20:09:24.585443 4793 generic.go:334] "Generic (PLEG): container finished" podID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerID="e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f" exitCode=0 Jan 27 20:09:24 crc kubenswrapper[4793]: I0127 20:09:24.586070 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerDied","Data":"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f"} Jan 27 20:09:24 crc kubenswrapper[4793]: I0127 20:09:24.604370 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6z2jf" event={"ID":"722970b7-fdc7-44ab-a809-4e55d8ac772a","Type":"ContainerStarted","Data":"6bb1b0bd832a0c76c7a96ad2ab21fdae49f71936c45ccbdd009762804b886212"} Jan 27 20:09:24 crc kubenswrapper[4793]: I0127 20:09:24.606648 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerStarted","Data":"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95"} Jan 27 20:09:24 crc kubenswrapper[4793]: I0127 20:09:24.677721 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6z2jf" podStartSLOduration=3.01290596 podStartE2EDuration="5.677703412s" podCreationTimestamp="2026-01-27 20:09:19 +0000 UTC" firstStartedPulling="2026-01-27 20:09:21.51913874 +0000 UTC m=+386.909391906" lastFinishedPulling="2026-01-27 20:09:24.183936202 +0000 UTC m=+389.574189358" observedRunningTime="2026-01-27 20:09:24.675428336 +0000 UTC m=+390.065681492" watchObservedRunningTime="2026-01-27 20:09:24.677703412 +0000 UTC m=+390.067956568" Jan 27 20:09:25 crc kubenswrapper[4793]: I0127 20:09:25.617685 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerStarted","Data":"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3"} Jan 27 20:09:25 crc kubenswrapper[4793]: I0127 20:09:25.619821 4793 generic.go:334] "Generic (PLEG): container finished" podID="48ec0076-1321-431f-8d4a-06ab47d87847" containerID="f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95" exitCode=0 Jan 27 20:09:25 crc kubenswrapper[4793]: I0127 20:09:25.620754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerDied","Data":"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95"} Jan 27 20:09:25 crc kubenswrapper[4793]: I0127 20:09:25.638780 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dndx6" podStartSLOduration=1.857535898 podStartE2EDuration="4.638763584s" podCreationTimestamp="2026-01-27 20:09:21 +0000 UTC" firstStartedPulling="2026-01-27 20:09:22.526831631 +0000 UTC m=+387.917084787" lastFinishedPulling="2026-01-27 20:09:25.308059317 +0000 UTC m=+390.698312473" observedRunningTime="2026-01-27 20:09:25.636726374 +0000 UTC m=+391.026979520" watchObservedRunningTime="2026-01-27 20:09:25.638763584 +0000 UTC m=+391.029016740" Jan 27 20:09:26 crc kubenswrapper[4793]: I0127 20:09:26.628172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerStarted","Data":"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622"} Jan 27 20:09:26 crc kubenswrapper[4793]: I0127 20:09:26.654497 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6vqft" podStartSLOduration=2.035203899 podStartE2EDuration="4.654477869s" podCreationTimestamp="2026-01-27 20:09:22 +0000 UTC" firstStartedPulling="2026-01-27 20:09:23.553003114 +0000 UTC m=+388.943256270" lastFinishedPulling="2026-01-27 20:09:26.172277084 +0000 UTC m=+391.562530240" observedRunningTime="2026-01-27 20:09:26.652031469 +0000 UTC m=+392.042284625" watchObservedRunningTime="2026-01-27 20:09:26.654477869 +0000 UTC m=+392.044731035" Jan 27 20:09:29 crc kubenswrapper[4793]: I0127 20:09:29.157908 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-nqzlb" Jan 27 20:09:29 crc kubenswrapper[4793]: I0127 20:09:29.208726 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:09:29 crc kubenswrapper[4793]: I0127 20:09:29.819154 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:29 crc kubenswrapper[4793]: I0127 20:09:29.819205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:29 crc kubenswrapper[4793]: I0127 20:09:29.871114 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:30 crc kubenswrapper[4793]: I0127 20:09:30.211171 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:30 crc kubenswrapper[4793]: I0127 20:09:30.211221 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:30 crc kubenswrapper[4793]: I0127 20:09:30.254402 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:30 crc kubenswrapper[4793]: I0127 20:09:30.687343 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:09:30 crc kubenswrapper[4793]: I0127 20:09:30.704759 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6z2jf" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.016120 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.017427 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.059271 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.643936 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.644695 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.691995 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:32 crc kubenswrapper[4793]: I0127 20:09:32.703598 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 20:09:33 crc kubenswrapper[4793]: I0127 20:09:33.709229 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 20:09:52 crc kubenswrapper[4793]: I0127 20:09:52.753944 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:09:52 crc kubenswrapper[4793]: I0127 20:09:52.754622 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:09:52 crc kubenswrapper[4793]: I0127 20:09:52.754684 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:09:52 crc kubenswrapper[4793]: I0127 20:09:52.755386 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:09:52 crc kubenswrapper[4793]: I0127 20:09:52.755445 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085" gracePeriod=600 Jan 27 20:09:53 crc kubenswrapper[4793]: I0127 20:09:53.812448 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085" exitCode=0 Jan 27 20:09:53 crc kubenswrapper[4793]: I0127 20:09:53.816253 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085"} Jan 27 20:09:53 crc kubenswrapper[4793]: I0127 20:09:53.816316 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4"} Jan 27 20:09:53 crc kubenswrapper[4793]: I0127 20:09:53.816346 4793 scope.go:117] "RemoveContainer" containerID="6e6786b6ca5606c23ad8a2fd56dac878791320e2be15c08e6ce0ebcad61e505b" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.285598 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" podUID="28a34749-2dfc-4164-a7b9-016f47e098cd" containerName="registry" containerID="cri-o://b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34" gracePeriod=30 Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.603376 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764202 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764245 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764521 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764583 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764613 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764630 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764714 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcgms\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.764742 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets\") pod \"28a34749-2dfc-4164-a7b9-016f47e098cd\" (UID: \"28a34749-2dfc-4164-a7b9-016f47e098cd\") " Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.765484 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.765576 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.770905 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.771041 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.773138 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.776370 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms" (OuterVolumeSpecName: "kube-api-access-wcgms") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "kube-api-access-wcgms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.783404 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.786155 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "28a34749-2dfc-4164-a7b9-016f47e098cd" (UID: "28a34749-2dfc-4164-a7b9-016f47e098cd"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.824483 4793 generic.go:334] "Generic (PLEG): container finished" podID="28a34749-2dfc-4164-a7b9-016f47e098cd" containerID="b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34" exitCode=0 Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.824521 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" event={"ID":"28a34749-2dfc-4164-a7b9-016f47e098cd","Type":"ContainerDied","Data":"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34"} Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.824542 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" event={"ID":"28a34749-2dfc-4164-a7b9-016f47e098cd","Type":"ContainerDied","Data":"f40a9da6d2ca279328efe414b2196f624a546c3940cc690e62d940c9c65e0bcf"} Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.824583 4793 scope.go:117] "RemoveContainer" containerID="b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.824659 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v8ht5" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.846618 4793 scope.go:117] "RemoveContainer" containerID="b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34" Jan 27 20:09:54 crc kubenswrapper[4793]: E0127 20:09:54.848425 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34\": container with ID starting with b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34 not found: ID does not exist" containerID="b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.848497 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34"} err="failed to get container status \"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34\": rpc error: code = NotFound desc = could not find container \"b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34\": container with ID starting with b9c79b5d9e9adf0088ece156abe6738b1c5d30faca94d115bdd7d5ae3dedcb34 not found: ID does not exist" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.856527 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.859915 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v8ht5"] Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865863 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcgms\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-kube-api-access-wcgms\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865888 4793 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/28a34749-2dfc-4164-a7b9-016f47e098cd-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865897 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865908 4793 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865924 4793 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865941 4793 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/28a34749-2dfc-4164-a7b9-016f47e098cd-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:54 crc kubenswrapper[4793]: I0127 20:09:54.865952 4793 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/28a34749-2dfc-4164-a7b9-016f47e098cd-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 27 20:09:55 crc kubenswrapper[4793]: I0127 20:09:55.813390 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28a34749-2dfc-4164-a7b9-016f47e098cd" path="/var/lib/kubelet/pods/28a34749-2dfc-4164-a7b9-016f47e098cd/volumes" Jan 27 20:12:22 crc kubenswrapper[4793]: I0127 20:12:22.754070 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:12:22 crc kubenswrapper[4793]: I0127 20:12:22.754657 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:12:52 crc kubenswrapper[4793]: I0127 20:12:52.753346 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:12:52 crc kubenswrapper[4793]: I0127 20:12:52.753992 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:13:22 crc kubenswrapper[4793]: I0127 20:13:22.754006 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:13:22 crc kubenswrapper[4793]: I0127 20:13:22.754630 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:13:22 crc kubenswrapper[4793]: I0127 20:13:22.754700 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:13:22 crc kubenswrapper[4793]: I0127 20:13:22.755484 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:13:22 crc kubenswrapper[4793]: I0127 20:13:22.755612 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4" gracePeriod=600 Jan 27 20:13:24 crc kubenswrapper[4793]: I0127 20:13:24.011763 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4" exitCode=0 Jan 27 20:13:24 crc kubenswrapper[4793]: I0127 20:13:24.011857 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4"} Jan 27 20:13:24 crc kubenswrapper[4793]: I0127 20:13:24.012312 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817"} Jan 27 20:13:24 crc kubenswrapper[4793]: I0127 20:13:24.012335 4793 scope.go:117] "RemoveContainer" containerID="5ad0190dc6c6ef5802a60f92533256b89a77bdfc77d69410a9c08fd1d53d4085" Jan 27 20:13:56 crc kubenswrapper[4793]: I0127 20:13:56.952742 4793 scope.go:117] "RemoveContainer" containerID="a48b2acf599910678953475595199d8907e6ea5f0eaf9dd9efe4650149da85ee" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.174504 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d"] Jan 27 20:15:00 crc kubenswrapper[4793]: E0127 20:15:00.175463 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a34749-2dfc-4164-a7b9-016f47e098cd" containerName="registry" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.175486 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a34749-2dfc-4164-a7b9-016f47e098cd" containerName="registry" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.175672 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="28a34749-2dfc-4164-a7b9-016f47e098cd" containerName="registry" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.176144 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.178332 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.178364 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.188215 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d"] Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.191452 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.191528 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.191583 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xxqb\" (UniqueName: \"kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.292127 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.292181 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.292204 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xxqb\" (UniqueName: \"kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.293440 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.303373 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.313687 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xxqb\" (UniqueName: \"kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb\") pod \"collect-profiles-29492415-4nh4d\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.544884 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:00 crc kubenswrapper[4793]: I0127 20:15:00.756961 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d"] Jan 27 20:15:01 crc kubenswrapper[4793]: I0127 20:15:01.541417 4793 generic.go:334] "Generic (PLEG): container finished" podID="8c25caa3-6c82-4cf8-b868-bafec109e3ea" containerID="29b15860166ba63defcc518b77ae7202526164d84e4d3648790c3b16ee6cc210" exitCode=0 Jan 27 20:15:01 crc kubenswrapper[4793]: I0127 20:15:01.541470 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" event={"ID":"8c25caa3-6c82-4cf8-b868-bafec109e3ea","Type":"ContainerDied","Data":"29b15860166ba63defcc518b77ae7202526164d84e4d3648790c3b16ee6cc210"} Jan 27 20:15:01 crc kubenswrapper[4793]: I0127 20:15:01.541500 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" event={"ID":"8c25caa3-6c82-4cf8-b868-bafec109e3ea","Type":"ContainerStarted","Data":"d50489a391f61be3b9033d577ffcf81a495fd4b92c2a85966ae845d859f13ae0"} Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.805127 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.825268 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xxqb\" (UniqueName: \"kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb\") pod \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.825377 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume\") pod \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.825596 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume\") pod \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\" (UID: \"8c25caa3-6c82-4cf8-b868-bafec109e3ea\") " Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.826377 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume" (OuterVolumeSpecName: "config-volume") pod "8c25caa3-6c82-4cf8-b868-bafec109e3ea" (UID: "8c25caa3-6c82-4cf8-b868-bafec109e3ea"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.830827 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb" (OuterVolumeSpecName: "kube-api-access-6xxqb") pod "8c25caa3-6c82-4cf8-b868-bafec109e3ea" (UID: "8c25caa3-6c82-4cf8-b868-bafec109e3ea"). InnerVolumeSpecName "kube-api-access-6xxqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.831632 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8c25caa3-6c82-4cf8-b868-bafec109e3ea" (UID: "8c25caa3-6c82-4cf8-b868-bafec109e3ea"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.926458 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c25caa3-6c82-4cf8-b868-bafec109e3ea-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.926518 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xxqb\" (UniqueName: \"kubernetes.io/projected/8c25caa3-6c82-4cf8-b868-bafec109e3ea-kube-api-access-6xxqb\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:02 crc kubenswrapper[4793]: I0127 20:15:02.926532 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c25caa3-6c82-4cf8-b868-bafec109e3ea-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:03 crc kubenswrapper[4793]: I0127 20:15:03.555768 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" event={"ID":"8c25caa3-6c82-4cf8-b868-bafec109e3ea","Type":"ContainerDied","Data":"d50489a391f61be3b9033d577ffcf81a495fd4b92c2a85966ae845d859f13ae0"} Jan 27 20:15:03 crc kubenswrapper[4793]: I0127 20:15:03.555830 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d" Jan 27 20:15:03 crc kubenswrapper[4793]: I0127 20:15:03.555833 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d50489a391f61be3b9033d577ffcf81a495fd4b92c2a85966ae845d859f13ae0" Jan 27 20:15:33 crc kubenswrapper[4793]: I0127 20:15:33.286487 4793 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.171787 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-q928d"] Jan 27 20:15:44 crc kubenswrapper[4793]: E0127 20:15:44.172691 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c25caa3-6c82-4cf8-b868-bafec109e3ea" containerName="collect-profiles" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.172713 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c25caa3-6c82-4cf8-b868-bafec109e3ea" containerName="collect-profiles" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.172857 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c25caa3-6c82-4cf8-b868-bafec109e3ea" containerName="collect-profiles" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.173352 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.176986 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.177240 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.177910 4793 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-8nbmb" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.182938 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-c48q7"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.183864 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-c48q7" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.186222 4793 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-rrm96" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.190824 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-q928d"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.200610 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fhq5k"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.201618 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.204348 4793 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-vjjg6" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.211118 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct6jp\" (UniqueName: \"kubernetes.io/projected/2b42206d-0339-46d2-9d21-a1486a3b671e-kube-api-access-ct6jp\") pod \"cert-manager-cainjector-cf98fcc89-q928d\" (UID: \"2b42206d-0339-46d2-9d21-a1486a3b671e\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.211193 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkdng\" (UniqueName: \"kubernetes.io/projected/248ec9d6-6acc-4c1e-bd45-9c51293869d1-kube-api-access-pkdng\") pod \"cert-manager-858654f9db-c48q7\" (UID: \"248ec9d6-6acc-4c1e-bd45-9c51293869d1\") " pod="cert-manager/cert-manager-858654f9db-c48q7" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.211246 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d72hd\" (UniqueName: \"kubernetes.io/projected/62328e2f-a41e-4337-993d-d99b5fc3cbc6-kube-api-access-d72hd\") pod \"cert-manager-webhook-687f57d79b-fhq5k\" (UID: \"62328e2f-a41e-4337-993d-d99b5fc3cbc6\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.221773 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-c48q7"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.232591 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fhq5k"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.312163 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d72hd\" (UniqueName: \"kubernetes.io/projected/62328e2f-a41e-4337-993d-d99b5fc3cbc6-kube-api-access-d72hd\") pod \"cert-manager-webhook-687f57d79b-fhq5k\" (UID: \"62328e2f-a41e-4337-993d-d99b5fc3cbc6\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.312318 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct6jp\" (UniqueName: \"kubernetes.io/projected/2b42206d-0339-46d2-9d21-a1486a3b671e-kube-api-access-ct6jp\") pod \"cert-manager-cainjector-cf98fcc89-q928d\" (UID: \"2b42206d-0339-46d2-9d21-a1486a3b671e\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.312385 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkdng\" (UniqueName: \"kubernetes.io/projected/248ec9d6-6acc-4c1e-bd45-9c51293869d1-kube-api-access-pkdng\") pod \"cert-manager-858654f9db-c48q7\" (UID: \"248ec9d6-6acc-4c1e-bd45-9c51293869d1\") " pod="cert-manager/cert-manager-858654f9db-c48q7" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.332581 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkdng\" (UniqueName: \"kubernetes.io/projected/248ec9d6-6acc-4c1e-bd45-9c51293869d1-kube-api-access-pkdng\") pod \"cert-manager-858654f9db-c48q7\" (UID: \"248ec9d6-6acc-4c1e-bd45-9c51293869d1\") " pod="cert-manager/cert-manager-858654f9db-c48q7" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.334713 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d72hd\" (UniqueName: \"kubernetes.io/projected/62328e2f-a41e-4337-993d-d99b5fc3cbc6-kube-api-access-d72hd\") pod \"cert-manager-webhook-687f57d79b-fhq5k\" (UID: \"62328e2f-a41e-4337-993d-d99b5fc3cbc6\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.340185 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct6jp\" (UniqueName: \"kubernetes.io/projected/2b42206d-0339-46d2-9d21-a1486a3b671e-kube-api-access-ct6jp\") pod \"cert-manager-cainjector-cf98fcc89-q928d\" (UID: \"2b42206d-0339-46d2-9d21-a1486a3b671e\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.495774 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.505234 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-c48q7" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.522660 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.778184 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fhq5k"] Jan 27 20:15:44 crc kubenswrapper[4793]: I0127 20:15:44.790376 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:15:45 crc kubenswrapper[4793]: I0127 20:15:45.033963 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-c48q7"] Jan 27 20:15:45 crc kubenswrapper[4793]: I0127 20:15:45.040622 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-q928d"] Jan 27 20:15:45 crc kubenswrapper[4793]: W0127 20:15:45.043437 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b42206d_0339_46d2_9d21_a1486a3b671e.slice/crio-c06c15d118acfd918bbc33984e781ef44fc92fb4d7563360fb72dc6de3694653 WatchSource:0}: Error finding container c06c15d118acfd918bbc33984e781ef44fc92fb4d7563360fb72dc6de3694653: Status 404 returned error can't find the container with id c06c15d118acfd918bbc33984e781ef44fc92fb4d7563360fb72dc6de3694653 Jan 27 20:15:45 crc kubenswrapper[4793]: W0127 20:15:45.044382 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod248ec9d6_6acc_4c1e_bd45_9c51293869d1.slice/crio-1a8636f192623df377d8c955f37bd04be355f160f55c521051806494b2ad1961 WatchSource:0}: Error finding container 1a8636f192623df377d8c955f37bd04be355f160f55c521051806494b2ad1961: Status 404 returned error can't find the container with id 1a8636f192623df377d8c955f37bd04be355f160f55c521051806494b2ad1961 Jan 27 20:15:45 crc kubenswrapper[4793]: I0127 20:15:45.781209 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" event={"ID":"2b42206d-0339-46d2-9d21-a1486a3b671e","Type":"ContainerStarted","Data":"c06c15d118acfd918bbc33984e781ef44fc92fb4d7563360fb72dc6de3694653"} Jan 27 20:15:45 crc kubenswrapper[4793]: I0127 20:15:45.783829 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-c48q7" event={"ID":"248ec9d6-6acc-4c1e-bd45-9c51293869d1","Type":"ContainerStarted","Data":"1a8636f192623df377d8c955f37bd04be355f160f55c521051806494b2ad1961"} Jan 27 20:15:45 crc kubenswrapper[4793]: I0127 20:15:45.785172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" event={"ID":"62328e2f-a41e-4337-993d-d99b5fc3cbc6","Type":"ContainerStarted","Data":"90635c7f39b9d6718c7860d09d1a4e7566c29e84ea801f975171886477f94778"} Jan 27 20:15:47 crc kubenswrapper[4793]: I0127 20:15:47.813452 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" event={"ID":"62328e2f-a41e-4337-993d-d99b5fc3cbc6","Type":"ContainerStarted","Data":"f5abc723f85a32162c6896879ce540ef1d786109af1cf140089d15de49272c72"} Jan 27 20:15:47 crc kubenswrapper[4793]: I0127 20:15:47.813785 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:49 crc kubenswrapper[4793]: I0127 20:15:49.822276 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" event={"ID":"2b42206d-0339-46d2-9d21-a1486a3b671e","Type":"ContainerStarted","Data":"f8d6238fc972472e84eb7bd114cb49439fe75ce647ba88e21271afdc8e380c4d"} Jan 27 20:15:49 crc kubenswrapper[4793]: I0127 20:15:49.823929 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-c48q7" event={"ID":"248ec9d6-6acc-4c1e-bd45-9c51293869d1","Type":"ContainerStarted","Data":"faf366c7edeb17fe399fd24d7e0e0ad08a29dbcf272f8da4073345ef1b0da3fe"} Jan 27 20:15:49 crc kubenswrapper[4793]: I0127 20:15:49.856962 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-q928d" podStartSLOduration=1.987288483 podStartE2EDuration="5.856931616s" podCreationTimestamp="2026-01-27 20:15:44 +0000 UTC" firstStartedPulling="2026-01-27 20:15:45.047328634 +0000 UTC m=+770.437581790" lastFinishedPulling="2026-01-27 20:15:48.916971767 +0000 UTC m=+774.307224923" observedRunningTime="2026-01-27 20:15:49.850582381 +0000 UTC m=+775.240835547" watchObservedRunningTime="2026-01-27 20:15:49.856931616 +0000 UTC m=+775.247184772" Jan 27 20:15:49 crc kubenswrapper[4793]: I0127 20:15:49.858250 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" podStartSLOduration=3.158712062 podStartE2EDuration="5.858241408s" podCreationTimestamp="2026-01-27 20:15:44 +0000 UTC" firstStartedPulling="2026-01-27 20:15:44.7898673 +0000 UTC m=+770.180120456" lastFinishedPulling="2026-01-27 20:15:47.489396646 +0000 UTC m=+772.879649802" observedRunningTime="2026-01-27 20:15:47.820418099 +0000 UTC m=+773.210671255" watchObservedRunningTime="2026-01-27 20:15:49.858241408 +0000 UTC m=+775.248494564" Jan 27 20:15:49 crc kubenswrapper[4793]: I0127 20:15:49.895822 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-c48q7" podStartSLOduration=2.028288066 podStartE2EDuration="5.895783466s" podCreationTimestamp="2026-01-27 20:15:44 +0000 UTC" firstStartedPulling="2026-01-27 20:15:45.047126699 +0000 UTC m=+770.437379855" lastFinishedPulling="2026-01-27 20:15:48.914622099 +0000 UTC m=+774.304875255" observedRunningTime="2026-01-27 20:15:49.89224361 +0000 UTC m=+775.282496766" watchObservedRunningTime="2026-01-27 20:15:49.895783466 +0000 UTC m=+775.286036632" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.492751 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8glmz"] Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493204 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-controller" containerID="cri-o://2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493642 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="sbdb" containerID="cri-o://07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493689 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="nbdb" containerID="cri-o://077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493727 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="northd" containerID="cri-o://2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493766 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493807 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-node" containerID="cri-o://00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.493831 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-acl-logging" containerID="cri-o://8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.557307 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" containerID="cri-o://2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" gracePeriod=30 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.754158 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.754231 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.779364 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/3.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.781645 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovn-acl-logging/0.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.782143 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovn-controller/0.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.782695 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.834813 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qdcq6"] Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835055 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-acl-logging" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835070 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-acl-logging" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835084 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kubecfg-setup" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835097 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kubecfg-setup" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835108 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="sbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835117 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="sbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835127 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835135 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835147 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835155 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835165 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="northd" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835176 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="northd" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835186 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835195 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835207 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="nbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835214 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="nbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835227 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835235 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835244 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835252 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835264 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-node" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835272 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-node" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835280 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835288 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: E0127 20:15:52.835300 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835309 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835452 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835466 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="nbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835477 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="sbdb" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835486 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835498 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-node" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835510 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835538 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovn-acl-logging" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835570 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="northd" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835580 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835594 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835818 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.835832 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerName="ovnkube-controller" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.838268 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.847342 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/2.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.848036 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/1.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.848256 4793 generic.go:334] "Generic (PLEG): container finished" podID="d3e7b749-a397-4db6-8b6e-ddde6b3fdced" containerID="b9a77f189b3970dae25374aabc946e103c7d2dca0881cb4c4fc87338fc15237a" exitCode=2 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.848333 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerDied","Data":"b9a77f189b3970dae25374aabc946e103c7d2dca0881cb4c4fc87338fc15237a"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.848401 4793 scope.go:117] "RemoveContainer" containerID="5523d5c8be12ef4b6f5d0b544383354aecb339148cf8dff40faff81570a033dd" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.848850 4793 scope.go:117] "RemoveContainer" containerID="b9a77f189b3970dae25374aabc946e103c7d2dca0881cb4c4fc87338fc15237a" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.852870 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovnkube-controller/3.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.856886 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovn-acl-logging/0.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857330 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8glmz_4fb300f8-bf40-4c4e-a3e5-4d5149177aae/ovn-controller/0.log" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857658 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857677 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857684 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857693 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857701 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857708 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" exitCode=0 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857714 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" exitCode=143 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857721 4793 generic.go:334] "Generic (PLEG): container finished" podID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" exitCode=143 Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857741 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857766 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857776 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857786 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857795 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857805 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857815 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857825 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857830 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857835 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857840 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857844 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857849 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857853 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857858 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857863 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857870 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857878 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857884 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857889 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857894 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857899 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857904 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857909 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857915 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857921 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857925 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857932 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857940 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857945 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857950 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857955 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857961 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857965 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857970 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857976 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857981 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857986 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.857992 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" event={"ID":"4fb300f8-bf40-4c4e-a3e5-4d5149177aae","Type":"ContainerDied","Data":"18bc0fc7af540440b2e36178c14dd860d734ce17106f76ee7c845a89fb069dfd"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858000 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858006 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858010 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858015 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858019 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858024 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858029 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858034 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858039 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858044 4793 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.858123 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8glmz" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.891218 4793 scope.go:117] "RemoveContainer" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918084 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918135 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918176 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918208 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918235 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918230 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918266 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpdjd\" (UniqueName: \"kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918269 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918306 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918321 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918353 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918388 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918446 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918461 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash" (OuterVolumeSpecName: "host-slash") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918491 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918532 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918595 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918640 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918688 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918717 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918752 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918786 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918816 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918536 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918567 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918724 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918876 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918933 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918939 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918941 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919021 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes\") pod \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\" (UID: \"4fb300f8-bf40-4c4e-a3e5-4d5149177aae\") " Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.918970 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket" (OuterVolumeSpecName: "log-socket") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919217 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919243 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919267 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log" (OuterVolumeSpecName: "node-log") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919285 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919334 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919469 4793 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919529 4793 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919640 4793 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919659 4793 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919670 4793 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919679 4793 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919689 4793 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919698 4793 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919729 4793 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-slash\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919768 4793 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919790 4793 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919811 4793 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.919878 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.923918 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.924358 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd" (OuterVolumeSpecName: "kube-api-access-rpdjd") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "kube-api-access-rpdjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.933896 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "4fb300f8-bf40-4c4e-a3e5-4d5149177aae" (UID: "4fb300f8-bf40-4c4e-a3e5-4d5149177aae"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.941898 4793 scope.go:117] "RemoveContainer" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.954087 4793 scope.go:117] "RemoveContainer" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.965677 4793 scope.go:117] "RemoveContainer" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.976150 4793 scope.go:117] "RemoveContainer" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:52 crc kubenswrapper[4793]: I0127 20:15:52.988451 4793 scope.go:117] "RemoveContainer" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.002752 4793 scope.go:117] "RemoveContainer" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.015422 4793 scope.go:117] "RemoveContainer" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021330 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl8mp\" (UniqueName: \"kubernetes.io/projected/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-kube-api-access-sl8mp\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021379 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-systemd-units\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021401 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-netns\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021428 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021455 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-var-lib-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021475 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-env-overrides\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021494 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovn-node-metrics-cert\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021526 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-bin\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021572 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021654 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-slash\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021712 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-kubelet\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021736 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-log-socket\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021768 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-node-log\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021782 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-systemd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021804 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-ovn\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021842 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-script-lib\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021859 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-etc-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021882 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-config\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021902 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-netd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.021927 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022250 4793 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-log-socket\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022265 4793 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022277 4793 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-node-log\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022286 4793 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022295 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpdjd\" (UniqueName: \"kubernetes.io/projected/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-kube-api-access-rpdjd\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022305 4793 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022314 4793 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.022323 4793 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb300f8-bf40-4c4e-a3e5-4d5149177aae-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.028509 4793 scope.go:117] "RemoveContainer" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.042133 4793 scope.go:117] "RemoveContainer" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.042485 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": container with ID starting with 2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f not found: ID does not exist" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.042519 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} err="failed to get container status \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": rpc error: code = NotFound desc = could not find container \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": container with ID starting with 2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.042898 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.043265 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": container with ID starting with 1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5 not found: ID does not exist" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043285 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} err="failed to get container status \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": rpc error: code = NotFound desc = could not find container \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": container with ID starting with 1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043296 4793 scope.go:117] "RemoveContainer" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.043468 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": container with ID starting with 07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0 not found: ID does not exist" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043508 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} err="failed to get container status \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": rpc error: code = NotFound desc = could not find container \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": container with ID starting with 07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043519 4793 scope.go:117] "RemoveContainer" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.043681 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": container with ID starting with 077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557 not found: ID does not exist" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043697 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} err="failed to get container status \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": rpc error: code = NotFound desc = could not find container \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": container with ID starting with 077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.043713 4793 scope.go:117] "RemoveContainer" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.044075 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": container with ID starting with 2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe not found: ID does not exist" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.044101 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} err="failed to get container status \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": rpc error: code = NotFound desc = could not find container \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": container with ID starting with 2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.044115 4793 scope.go:117] "RemoveContainer" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.044618 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": container with ID starting with e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e not found: ID does not exist" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.044639 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} err="failed to get container status \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": rpc error: code = NotFound desc = could not find container \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": container with ID starting with e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.044653 4793 scope.go:117] "RemoveContainer" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.044939 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": container with ID starting with 00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82 not found: ID does not exist" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.044961 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} err="failed to get container status \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": rpc error: code = NotFound desc = could not find container \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": container with ID starting with 00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.045644 4793 scope.go:117] "RemoveContainer" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.045980 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": container with ID starting with 8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5 not found: ID does not exist" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046051 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} err="failed to get container status \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": rpc error: code = NotFound desc = could not find container \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": container with ID starting with 8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046086 4793 scope.go:117] "RemoveContainer" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.046460 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": container with ID starting with 2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec not found: ID does not exist" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046490 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} err="failed to get container status \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": rpc error: code = NotFound desc = could not find container \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": container with ID starting with 2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046517 4793 scope.go:117] "RemoveContainer" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: E0127 20:15:53.046864 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": container with ID starting with 8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d not found: ID does not exist" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046883 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} err="failed to get container status \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": rpc error: code = NotFound desc = could not find container \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": container with ID starting with 8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.046897 4793 scope.go:117] "RemoveContainer" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047090 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} err="failed to get container status \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": rpc error: code = NotFound desc = could not find container \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": container with ID starting with 2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047108 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047385 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} err="failed to get container status \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": rpc error: code = NotFound desc = could not find container \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": container with ID starting with 1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047405 4793 scope.go:117] "RemoveContainer" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047804 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} err="failed to get container status \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": rpc error: code = NotFound desc = could not find container \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": container with ID starting with 07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.047822 4793 scope.go:117] "RemoveContainer" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050087 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} err="failed to get container status \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": rpc error: code = NotFound desc = could not find container \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": container with ID starting with 077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050109 4793 scope.go:117] "RemoveContainer" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050322 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} err="failed to get container status \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": rpc error: code = NotFound desc = could not find container \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": container with ID starting with 2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050339 4793 scope.go:117] "RemoveContainer" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050577 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} err="failed to get container status \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": rpc error: code = NotFound desc = could not find container \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": container with ID starting with e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050592 4793 scope.go:117] "RemoveContainer" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050864 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} err="failed to get container status \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": rpc error: code = NotFound desc = could not find container \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": container with ID starting with 00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.050888 4793 scope.go:117] "RemoveContainer" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051137 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} err="failed to get container status \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": rpc error: code = NotFound desc = could not find container \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": container with ID starting with 8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051157 4793 scope.go:117] "RemoveContainer" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051347 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} err="failed to get container status \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": rpc error: code = NotFound desc = could not find container \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": container with ID starting with 2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051365 4793 scope.go:117] "RemoveContainer" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051667 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} err="failed to get container status \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": rpc error: code = NotFound desc = could not find container \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": container with ID starting with 8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051687 4793 scope.go:117] "RemoveContainer" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051893 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} err="failed to get container status \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": rpc error: code = NotFound desc = could not find container \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": container with ID starting with 2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.051910 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.052626 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} err="failed to get container status \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": rpc error: code = NotFound desc = could not find container \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": container with ID starting with 1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.052646 4793 scope.go:117] "RemoveContainer" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.052955 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} err="failed to get container status \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": rpc error: code = NotFound desc = could not find container \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": container with ID starting with 07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.052972 4793 scope.go:117] "RemoveContainer" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053232 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} err="failed to get container status \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": rpc error: code = NotFound desc = could not find container \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": container with ID starting with 077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053250 4793 scope.go:117] "RemoveContainer" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053439 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} err="failed to get container status \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": rpc error: code = NotFound desc = could not find container \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": container with ID starting with 2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053454 4793 scope.go:117] "RemoveContainer" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053748 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} err="failed to get container status \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": rpc error: code = NotFound desc = could not find container \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": container with ID starting with e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.053764 4793 scope.go:117] "RemoveContainer" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054079 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} err="failed to get container status \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": rpc error: code = NotFound desc = could not find container \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": container with ID starting with 00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054097 4793 scope.go:117] "RemoveContainer" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054284 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} err="failed to get container status \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": rpc error: code = NotFound desc = could not find container \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": container with ID starting with 8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054301 4793 scope.go:117] "RemoveContainer" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054482 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} err="failed to get container status \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": rpc error: code = NotFound desc = could not find container \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": container with ID starting with 2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054500 4793 scope.go:117] "RemoveContainer" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054724 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} err="failed to get container status \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": rpc error: code = NotFound desc = could not find container \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": container with ID starting with 8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054741 4793 scope.go:117] "RemoveContainer" containerID="2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054975 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f"} err="failed to get container status \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": rpc error: code = NotFound desc = could not find container \"2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f\": container with ID starting with 2604ae1c98504fdf033681e0c1012f064f9e5b13a9f32d8ae9f744f6b3547b9f not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.054991 4793 scope.go:117] "RemoveContainer" containerID="1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055194 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5"} err="failed to get container status \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": rpc error: code = NotFound desc = could not find container \"1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5\": container with ID starting with 1baaa5af575d6720b7b27b1309d09e59ba502d732300ce24a54ba9e77c239bc5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055211 4793 scope.go:117] "RemoveContainer" containerID="07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055426 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0"} err="failed to get container status \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": rpc error: code = NotFound desc = could not find container \"07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0\": container with ID starting with 07ebd94ed5e94632f4cefd5ee64c9fa375e827a881492d42edd9278a12677ef0 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055444 4793 scope.go:117] "RemoveContainer" containerID="077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055662 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557"} err="failed to get container status \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": rpc error: code = NotFound desc = could not find container \"077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557\": container with ID starting with 077b4da442a4d09815cf1e60d09e42f176ac4a473acae00eb7ff150ab31af557 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055681 4793 scope.go:117] "RemoveContainer" containerID="2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055872 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe"} err="failed to get container status \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": rpc error: code = NotFound desc = could not find container \"2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe\": container with ID starting with 2bc9c03092be3680167a567f2f2444d591be1d55707e7190b7905fa57b469dbe not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.055891 4793 scope.go:117] "RemoveContainer" containerID="e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056106 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e"} err="failed to get container status \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": rpc error: code = NotFound desc = could not find container \"e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e\": container with ID starting with e0453c49e7a2c28223dc4414cd89e3ed2dfa87b8496c47a54214502b84e8cd8e not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056122 4793 scope.go:117] "RemoveContainer" containerID="00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056435 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82"} err="failed to get container status \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": rpc error: code = NotFound desc = could not find container \"00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82\": container with ID starting with 00be2767e0af475baa0afe88023a8a6243fd54b71a4a421e3093d7d0058d6b82 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056453 4793 scope.go:117] "RemoveContainer" containerID="8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056637 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5"} err="failed to get container status \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": rpc error: code = NotFound desc = could not find container \"8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5\": container with ID starting with 8d019bca176119cccfadba3dceeaf610bf8ff32c13c8f58bc5617aecde9cfae5 not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056656 4793 scope.go:117] "RemoveContainer" containerID="2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056874 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec"} err="failed to get container status \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": rpc error: code = NotFound desc = could not find container \"2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec\": container with ID starting with 2172907e96f58bdbc0c04d1bf067e05490f12fc40db861f8fc230e690d5cb0ec not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.056892 4793 scope.go:117] "RemoveContainer" containerID="8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.057140 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d"} err="failed to get container status \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": rpc error: code = NotFound desc = could not find container \"8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d\": container with ID starting with 8b1a706c4d31e8ab5ce140b050ed7cfd80635bc929f1940167e67848a194a81d not found: ID does not exist" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123405 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-config\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123462 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-netd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123485 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123519 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl8mp\" (UniqueName: \"kubernetes.io/projected/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-kube-api-access-sl8mp\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123541 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-systemd-units\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123582 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-netns\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123607 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123632 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovn-node-metrics-cert\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123654 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-var-lib-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123674 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-env-overrides\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123705 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-bin\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123725 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123748 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-slash\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123770 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-kubelet\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123794 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-log-socket\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123816 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-systemd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123834 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-node-log\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123858 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-ovn\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123880 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-script-lib\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123929 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-etc-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.123992 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-etc-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.124702 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-config\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.124737 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-netd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.124763 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125087 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-systemd-units\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125117 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-netns\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125139 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125624 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-kubelet\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125659 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-run-ovn-kubernetes\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125681 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-slash\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125703 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-node-log\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125724 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-systemd\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125734 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-host-cni-bin\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125767 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-run-ovn\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125745 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-var-lib-openvswitch\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.125741 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-log-socket\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.126264 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovnkube-script-lib\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.126354 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-env-overrides\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.130139 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-ovn-node-metrics-cert\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.146405 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl8mp\" (UniqueName: \"kubernetes.io/projected/288a4e1f-2bf8-4987-9dd4-4d8700c303e0-kube-api-access-sl8mp\") pod \"ovnkube-node-qdcq6\" (UID: \"288a4e1f-2bf8-4987-9dd4-4d8700c303e0\") " pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.155054 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:15:53 crc kubenswrapper[4793]: W0127 20:15:53.170411 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod288a4e1f_2bf8_4987_9dd4_4d8700c303e0.slice/crio-e9d34ec1331c5714a8cb13935cf0460aee3fceac6cd23bd680029d986e5ed92d WatchSource:0}: Error finding container e9d34ec1331c5714a8cb13935cf0460aee3fceac6cd23bd680029d986e5ed92d: Status 404 returned error can't find the container with id e9d34ec1331c5714a8cb13935cf0460aee3fceac6cd23bd680029d986e5ed92d Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.193772 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8glmz"] Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.198097 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8glmz"] Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.811404 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb300f8-bf40-4c4e-a3e5-4d5149177aae" path="/var/lib/kubelet/pods/4fb300f8-bf40-4c4e-a3e5-4d5149177aae/volumes" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.866513 4793 generic.go:334] "Generic (PLEG): container finished" podID="288a4e1f-2bf8-4987-9dd4-4d8700c303e0" containerID="b1d4323c0d9652b9f14fdf09a931b6e17b11776020448f8152f2c3c458609280" exitCode=0 Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.866584 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerDied","Data":"b1d4323c0d9652b9f14fdf09a931b6e17b11776020448f8152f2c3c458609280"} Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.866637 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"e9d34ec1331c5714a8cb13935cf0460aee3fceac6cd23bd680029d986e5ed92d"} Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.868886 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7k9v7_d3e7b749-a397-4db6-8b6e-ddde6b3fdced/kube-multus/2.log" Jan 27 20:15:53 crc kubenswrapper[4793]: I0127 20:15:53.868944 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7k9v7" event={"ID":"d3e7b749-a397-4db6-8b6e-ddde6b3fdced","Type":"ContainerStarted","Data":"3b7250d298aefd3a20d1bb51bd5b06d7d144ac17429d317e3b6f688d1a504ddd"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.525886 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-fhq5k" Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.877377 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"9a78c5e4669d9136e1a7ca89f485f1c1ce15f1a93db0097d8c972e3ee6671a90"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.878934 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"57b13425130967eec922bddaab8473357791108b5eda790c534fef1a98f840ac"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.879181 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"bfedb6e867e27590a7c03c8f28ba2431af789d06201c00fb967fad6f52d30027"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.879270 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"330fc40c4fd1c7e95892ff84098d4bd37b1e106446e61e4f7cd797fa7fb7765e"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.879376 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"5d7841f88a5a27c9dd0fe9c2987c8593e7a026affbc2793819460909650ed914"} Jan 27 20:15:54 crc kubenswrapper[4793]: I0127 20:15:54.879464 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"9e4671c9d1118e6993206d4833f883e1c9a6877eeaaa7064d2587f0021d6d709"} Jan 27 20:15:56 crc kubenswrapper[4793]: I0127 20:15:56.890358 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"97416cb1951362025d2fc14ca0b4ae466ab1a86894970741ebf7aa6bf5dc42f9"} Jan 27 20:15:59 crc kubenswrapper[4793]: I0127 20:15:59.910288 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" event={"ID":"288a4e1f-2bf8-4987-9dd4-4d8700c303e0","Type":"ContainerStarted","Data":"531ecfafa66f7b893b11ea35d659ed0bfdf637ade520b7824dcb504ee7540549"} Jan 27 20:16:00 crc kubenswrapper[4793]: I0127 20:16:00.914609 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:00 crc kubenswrapper[4793]: I0127 20:16:00.914657 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:00 crc kubenswrapper[4793]: I0127 20:16:00.948977 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" podStartSLOduration=8.948957182000001 podStartE2EDuration="8.948957182s" podCreationTimestamp="2026-01-27 20:15:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:16:00.947992968 +0000 UTC m=+786.338246144" watchObservedRunningTime="2026-01-27 20:16:00.948957182 +0000 UTC m=+786.339210338" Jan 27 20:16:00 crc kubenswrapper[4793]: I0127 20:16:00.972897 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:01 crc kubenswrapper[4793]: I0127 20:16:01.919617 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:01 crc kubenswrapper[4793]: I0127 20:16:01.948963 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:22 crc kubenswrapper[4793]: I0127 20:16:22.754119 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:16:22 crc kubenswrapper[4793]: I0127 20:16:22.754541 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:16:24 crc kubenswrapper[4793]: I0127 20:16:24.716159 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qdcq6" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.011882 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx"] Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.013963 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.017039 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.022849 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx"] Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.091852 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v5sw\" (UniqueName: \"kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.091926 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.092026 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.193592 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.193713 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v5sw\" (UniqueName: \"kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.193751 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.194725 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.194826 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.215827 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v5sw\" (UniqueName: \"kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.342480 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:30 crc kubenswrapper[4793]: I0127 20:16:30.609159 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx"] Jan 27 20:16:31 crc kubenswrapper[4793]: I0127 20:16:31.076611 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerStarted","Data":"716c7ff0a5ed392de29d907df52368cdb270e6961d835a8abe9b8439b683b9fd"} Jan 27 20:16:31 crc kubenswrapper[4793]: I0127 20:16:31.076951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerStarted","Data":"d6adb859bf2842c6c3c9a06e8141878c3b56532b5d12995b90e7f147a9bca29c"} Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.082911 4793 generic.go:334] "Generic (PLEG): container finished" podID="b691d932-0b30-4838-9566-a378435e170d" containerID="716c7ff0a5ed392de29d907df52368cdb270e6961d835a8abe9b8439b683b9fd" exitCode=0 Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.082966 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerDied","Data":"716c7ff0a5ed392de29d907df52368cdb270e6961d835a8abe9b8439b683b9fd"} Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.364423 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.365518 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.384798 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.431952 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.432045 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.432449 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vphxn\" (UniqueName: \"kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.533918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vphxn\" (UniqueName: \"kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.534052 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.534083 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.534627 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.534784 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.570432 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vphxn\" (UniqueName: \"kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn\") pod \"redhat-operators-6frrg\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.702801 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:32 crc kubenswrapper[4793]: I0127 20:16:32.894183 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:33 crc kubenswrapper[4793]: I0127 20:16:33.094782 4793 generic.go:334] "Generic (PLEG): container finished" podID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerID="e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d" exitCode=0 Jan 27 20:16:33 crc kubenswrapper[4793]: I0127 20:16:33.094845 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerDied","Data":"e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d"} Jan 27 20:16:33 crc kubenswrapper[4793]: I0127 20:16:33.094881 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerStarted","Data":"56d16036ce1b39ad0683c1e900af3ae1f8ea57721cc487c93f75a5e0cdd5cb56"} Jan 27 20:16:34 crc kubenswrapper[4793]: I0127 20:16:34.103621 4793 generic.go:334] "Generic (PLEG): container finished" podID="b691d932-0b30-4838-9566-a378435e170d" containerID="e30417b2bf2d57c240eca34d7078ff2d08b96b9f16661a421a709a31a89b7a67" exitCode=0 Jan 27 20:16:34 crc kubenswrapper[4793]: I0127 20:16:34.103667 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerDied","Data":"e30417b2bf2d57c240eca34d7078ff2d08b96b9f16661a421a709a31a89b7a67"} Jan 27 20:16:34 crc kubenswrapper[4793]: I0127 20:16:34.107203 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerStarted","Data":"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf"} Jan 27 20:16:35 crc kubenswrapper[4793]: I0127 20:16:35.115390 4793 generic.go:334] "Generic (PLEG): container finished" podID="b691d932-0b30-4838-9566-a378435e170d" containerID="c496e5f2acbf0582ea05d119d00a4bc5829a80566ec15d19f0623a7930ba5400" exitCode=0 Jan 27 20:16:35 crc kubenswrapper[4793]: I0127 20:16:35.115559 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerDied","Data":"c496e5f2acbf0582ea05d119d00a4bc5829a80566ec15d19f0623a7930ba5400"} Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.600628 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.692338 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v5sw\" (UniqueName: \"kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw\") pod \"b691d932-0b30-4838-9566-a378435e170d\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.692500 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util\") pod \"b691d932-0b30-4838-9566-a378435e170d\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.692614 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle\") pod \"b691d932-0b30-4838-9566-a378435e170d\" (UID: \"b691d932-0b30-4838-9566-a378435e170d\") " Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.694997 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle" (OuterVolumeSpecName: "bundle") pod "b691d932-0b30-4838-9566-a378435e170d" (UID: "b691d932-0b30-4838-9566-a378435e170d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.698185 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw" (OuterVolumeSpecName: "kube-api-access-4v5sw") pod "b691d932-0b30-4838-9566-a378435e170d" (UID: "b691d932-0b30-4838-9566-a378435e170d"). InnerVolumeSpecName "kube-api-access-4v5sw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.702684 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util" (OuterVolumeSpecName: "util") pod "b691d932-0b30-4838-9566-a378435e170d" (UID: "b691d932-0b30-4838-9566-a378435e170d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.794109 4793 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-util\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.794151 4793 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b691d932-0b30-4838-9566-a378435e170d-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:36 crc kubenswrapper[4793]: I0127 20:16:36.794173 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v5sw\" (UniqueName: \"kubernetes.io/projected/b691d932-0b30-4838-9566-a378435e170d-kube-api-access-4v5sw\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:37 crc kubenswrapper[4793]: I0127 20:16:37.129275 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" event={"ID":"b691d932-0b30-4838-9566-a378435e170d","Type":"ContainerDied","Data":"d6adb859bf2842c6c3c9a06e8141878c3b56532b5d12995b90e7f147a9bca29c"} Jan 27 20:16:37 crc kubenswrapper[4793]: I0127 20:16:37.129371 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6adb859bf2842c6c3c9a06e8141878c3b56532b5d12995b90e7f147a9bca29c" Jan 27 20:16:37 crc kubenswrapper[4793]: I0127 20:16:37.129288 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx" Jan 27 20:16:37 crc kubenswrapper[4793]: I0127 20:16:37.131415 4793 generic.go:334] "Generic (PLEG): container finished" podID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerID="8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf" exitCode=0 Jan 27 20:16:37 crc kubenswrapper[4793]: I0127 20:16:37.131455 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerDied","Data":"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf"} Jan 27 20:16:38 crc kubenswrapper[4793]: I0127 20:16:38.141653 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerStarted","Data":"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55"} Jan 27 20:16:38 crc kubenswrapper[4793]: I0127 20:16:38.163586 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6frrg" podStartSLOduration=1.8250578370000001 podStartE2EDuration="6.163546667s" podCreationTimestamp="2026-01-27 20:16:32 +0000 UTC" firstStartedPulling="2026-01-27 20:16:33.192411388 +0000 UTC m=+818.582664544" lastFinishedPulling="2026-01-27 20:16:37.530900218 +0000 UTC m=+822.921153374" observedRunningTime="2026-01-27 20:16:38.162277806 +0000 UTC m=+823.552530962" watchObservedRunningTime="2026-01-27 20:16:38.163546667 +0000 UTC m=+823.553799823" Jan 27 20:16:42 crc kubenswrapper[4793]: I0127 20:16:42.728670 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:42 crc kubenswrapper[4793]: I0127 20:16:42.731186 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:44 crc kubenswrapper[4793]: I0127 20:16:44.459778 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6frrg" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="registry-server" probeResult="failure" output=< Jan 27 20:16:44 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:16:44 crc kubenswrapper[4793]: > Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.863684 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-557c7"] Jan 27 20:16:50 crc kubenswrapper[4793]: E0127 20:16:50.864712 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="extract" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.864748 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="extract" Jan 27 20:16:50 crc kubenswrapper[4793]: E0127 20:16:50.864781 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="pull" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.864795 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="pull" Jan 27 20:16:50 crc kubenswrapper[4793]: E0127 20:16:50.864808 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="util" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.864815 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="util" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.865002 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b691d932-0b30-4838-9566-a378435e170d" containerName="extract" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.865721 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.869526 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.869767 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-c7bfj" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.871751 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 27 20:16:50 crc kubenswrapper[4793]: I0127 20:16:50.891305 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-557c7"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.040416 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.042101 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.042626 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frhjg\" (UniqueName: \"kubernetes.io/projected/d778ab64-052f-4ffb-a80d-52d1807a499c-kube-api-access-frhjg\") pod \"obo-prometheus-operator-68bc856cb9-557c7\" (UID: \"d778ab64-052f-4ffb-a80d-52d1807a499c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.045338 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-wzwxc" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.047107 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.061469 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.062519 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.075420 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.104197 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.144046 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frhjg\" (UniqueName: \"kubernetes.io/projected/d778ab64-052f-4ffb-a80d-52d1807a499c-kube-api-access-frhjg\") pod \"obo-prometheus-operator-68bc856cb9-557c7\" (UID: \"d778ab64-052f-4ffb-a80d-52d1807a499c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.144755 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.144823 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.179727 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frhjg\" (UniqueName: \"kubernetes.io/projected/d778ab64-052f-4ffb-a80d-52d1807a499c-kube-api-access-frhjg\") pod \"obo-prometheus-operator-68bc856cb9-557c7\" (UID: \"d778ab64-052f-4ffb-a80d-52d1807a499c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.199490 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.216629 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-ml85d"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.217558 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.220460 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-q4qm2" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.222016 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.246318 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.246403 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.246457 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.246527 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.253054 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.256506 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cdf9943f-a502-4e73-bbb7-5b638de02443-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh\" (UID: \"cdf9943f-a502-4e73-bbb7-5b638de02443\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.262665 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-ml85d"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.348198 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-observability-operator-tls\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.348253 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk29l\" (UniqueName: \"kubernetes.io/projected/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-kube-api-access-wk29l\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.348361 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.348396 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.353278 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.353967 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15c11c1c-e768-4e99-ac32-157dbd118043-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59\" (UID: \"15c11c1c-e768-4e99-ac32-157dbd118043\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.364441 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-x6b5g"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.365310 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.366163 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.372002 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-v4zhw" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.384848 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-x6b5g"] Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.411464 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.459362 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5f176291-9b04-49fc-ad48-fe1552b2bcaf-openshift-service-ca\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.459438 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-observability-operator-tls\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.459458 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk29l\" (UniqueName: \"kubernetes.io/projected/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-kube-api-access-wk29l\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.459524 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7k9g\" (UniqueName: \"kubernetes.io/projected/5f176291-9b04-49fc-ad48-fe1552b2bcaf-kube-api-access-d7k9g\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.465853 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-observability-operator-tls\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.569816 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5f176291-9b04-49fc-ad48-fe1552b2bcaf-openshift-service-ca\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.569919 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7k9g\" (UniqueName: \"kubernetes.io/projected/5f176291-9b04-49fc-ad48-fe1552b2bcaf-kube-api-access-d7k9g\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.571222 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5f176291-9b04-49fc-ad48-fe1552b2bcaf-openshift-service-ca\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.605623 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7k9g\" (UniqueName: \"kubernetes.io/projected/5f176291-9b04-49fc-ad48-fe1552b2bcaf-kube-api-access-d7k9g\") pod \"perses-operator-5bf474d74f-x6b5g\" (UID: \"5f176291-9b04-49fc-ad48-fe1552b2bcaf\") " pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.606411 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk29l\" (UniqueName: \"kubernetes.io/projected/b58cf118-0ae5-43f3-bf3c-f5f01eb636ba-kube-api-access-wk29l\") pod \"observability-operator-59bdc8b94-ml85d\" (UID: \"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba\") " pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.780079 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:16:51 crc kubenswrapper[4793]: I0127 20:16:51.915928 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.004972 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-557c7"] Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.389888 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh"] Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.503420 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59"] Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.753581 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.753843 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.753890 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.754475 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.754532 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817" gracePeriod=600 Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.789029 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.818719 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-x6b5g"] Jan 27 20:16:52 crc kubenswrapper[4793]: W0127 20:16:52.846962 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f176291_9b04_49fc_ad48_fe1552b2bcaf.slice/crio-310612c0db7b273a4962bafdc0d737b5f8b57e4df9a43ecdc2293290f722253c WatchSource:0}: Error finding container 310612c0db7b273a4962bafdc0d737b5f8b57e4df9a43ecdc2293290f722253c: Status 404 returned error can't find the container with id 310612c0db7b273a4962bafdc0d737b5f8b57e4df9a43ecdc2293290f722253c Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.911912 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:52 crc kubenswrapper[4793]: I0127 20:16:52.925245 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-ml85d"] Jan 27 20:16:52 crc kubenswrapper[4793]: W0127 20:16:52.946634 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb58cf118_0ae5_43f3_bf3c_f5f01eb636ba.slice/crio-ba9fabdf202790dbbc48c9c76ff1f995f4db69d5bb0eb186eecce59e7af77d20 WatchSource:0}: Error finding container ba9fabdf202790dbbc48c9c76ff1f995f4db69d5bb0eb186eecce59e7af77d20: Status 404 returned error can't find the container with id ba9fabdf202790dbbc48c9c76ff1f995f4db69d5bb0eb186eecce59e7af77d20 Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.021163 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" event={"ID":"d778ab64-052f-4ffb-a80d-52d1807a499c","Type":"ContainerStarted","Data":"89c76ecea903767cd8ee6ddf879bfc57c08bfb99b025b295bc838adf7f951b66"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.023153 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" event={"ID":"5f176291-9b04-49fc-ad48-fe1552b2bcaf","Type":"ContainerStarted","Data":"310612c0db7b273a4962bafdc0d737b5f8b57e4df9a43ecdc2293290f722253c"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.024482 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" event={"ID":"cdf9943f-a502-4e73-bbb7-5b638de02443","Type":"ContainerStarted","Data":"21e5e96c32b19ed999549aa864754dab0687485ecba471ee6466a6934ae9ac83"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.026485 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817" exitCode=0 Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.026540 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.026593 4793 scope.go:117] "RemoveContainer" containerID="a08ed91fe096dea1257ab8ed2eedc19a010c60955e37fc267fc3369890a4e5c4" Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.027763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" event={"ID":"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba","Type":"ContainerStarted","Data":"ba9fabdf202790dbbc48c9c76ff1f995f4db69d5bb0eb186eecce59e7af77d20"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.031401 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" event={"ID":"15c11c1c-e768-4e99-ac32-157dbd118043","Type":"ContainerStarted","Data":"0d79886b2bd6c4e367e76105c538116aba40a6d1e2f0fcfcbb794cf30f400a49"} Jan 27 20:16:53 crc kubenswrapper[4793]: I0127 20:16:53.164989 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:54 crc kubenswrapper[4793]: I0127 20:16:54.125269 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a"} Jan 27 20:16:54 crc kubenswrapper[4793]: I0127 20:16:54.126188 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6frrg" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="registry-server" containerID="cri-o://535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55" gracePeriod=2 Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.003218 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.066579 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content\") pod \"15e32659-2b3c-4d16-8a8c-6442a3298534\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.066648 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities\") pod \"15e32659-2b3c-4d16-8a8c-6442a3298534\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.066699 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vphxn\" (UniqueName: \"kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn\") pod \"15e32659-2b3c-4d16-8a8c-6442a3298534\" (UID: \"15e32659-2b3c-4d16-8a8c-6442a3298534\") " Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.067810 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities" (OuterVolumeSpecName: "utilities") pod "15e32659-2b3c-4d16-8a8c-6442a3298534" (UID: "15e32659-2b3c-4d16-8a8c-6442a3298534"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.098777 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn" (OuterVolumeSpecName: "kube-api-access-vphxn") pod "15e32659-2b3c-4d16-8a8c-6442a3298534" (UID: "15e32659-2b3c-4d16-8a8c-6442a3298534"). InnerVolumeSpecName "kube-api-access-vphxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.167338 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.167367 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vphxn\" (UniqueName: \"kubernetes.io/projected/15e32659-2b3c-4d16-8a8c-6442a3298534-kube-api-access-vphxn\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.169107 4793 generic.go:334] "Generic (PLEG): container finished" podID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerID="535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55" exitCode=0 Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.170722 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6frrg" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.171098 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerDied","Data":"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55"} Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.171132 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6frrg" event={"ID":"15e32659-2b3c-4d16-8a8c-6442a3298534","Type":"ContainerDied","Data":"56d16036ce1b39ad0683c1e900af3ae1f8ea57721cc487c93f75a5e0cdd5cb56"} Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.171153 4793 scope.go:117] "RemoveContainer" containerID="535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.226244 4793 scope.go:117] "RemoveContainer" containerID="8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.241415 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15e32659-2b3c-4d16-8a8c-6442a3298534" (UID: "15e32659-2b3c-4d16-8a8c-6442a3298534"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.267455 4793 scope.go:117] "RemoveContainer" containerID="e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.268972 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e32659-2b3c-4d16-8a8c-6442a3298534-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.312764 4793 scope.go:117] "RemoveContainer" containerID="535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55" Jan 27 20:16:55 crc kubenswrapper[4793]: E0127 20:16:55.388557 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55\": container with ID starting with 535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55 not found: ID does not exist" containerID="535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.388678 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55"} err="failed to get container status \"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55\": rpc error: code = NotFound desc = could not find container \"535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55\": container with ID starting with 535fdc571ffdf0c308aa9e32fea3592485992d3996730826f4957e86f5f46f55 not found: ID does not exist" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.388767 4793 scope.go:117] "RemoveContainer" containerID="8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf" Jan 27 20:16:55 crc kubenswrapper[4793]: E0127 20:16:55.391672 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf\": container with ID starting with 8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf not found: ID does not exist" containerID="8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.391724 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf"} err="failed to get container status \"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf\": rpc error: code = NotFound desc = could not find container \"8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf\": container with ID starting with 8344962f666f71784597fbecf4c3574c7f421ab4dc8975702c37280b2a65ffdf not found: ID does not exist" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.391760 4793 scope.go:117] "RemoveContainer" containerID="e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d" Jan 27 20:16:55 crc kubenswrapper[4793]: E0127 20:16:55.392349 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d\": container with ID starting with e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d not found: ID does not exist" containerID="e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.392422 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d"} err="failed to get container status \"e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d\": rpc error: code = NotFound desc = could not find container \"e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d\": container with ID starting with e05e3e9b8749c6ffe98a73ff4c0e92d7e754f1119ddc689825a7252ee0157b8d not found: ID does not exist" Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.518245 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.524249 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6frrg"] Jan 27 20:16:55 crc kubenswrapper[4793]: I0127 20:16:55.821521 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" path="/var/lib/kubelet/pods/15e32659-2b3c-4d16-8a8c-6442a3298534/volumes" Jan 27 20:17:07 crc kubenswrapper[4793]: E0127 20:17:07.662284 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a" Jan 27 20:17:07 crc kubenswrapper[4793]: E0127 20:17:07.662869 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a,Command:[],Args:[--prometheus-config-reloader=$(RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER) --prometheus-instance-selector=app.kubernetes.io/managed-by=observability-operator --alertmanager-instance-selector=app.kubernetes.io/managed-by=observability-operator --thanos-ruler-instance-selector=app.kubernetes.io/managed-by=observability-operator --watch-referenced-objects-in-all-namespaces=true --disable-unmanaged-prometheus-configuration=true],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOGC,Value:30,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER,Value:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{157286400 0} {} 150Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-frhjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-68bc856cb9-557c7_openshift-operators(d778ab64-052f-4ffb-a80d-52d1807a499c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:17:07 crc kubenswrapper[4793]: E0127 20:17:07.663993 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" podUID="d778ab64-052f-4ffb-a80d-52d1807a499c" Jan 27 20:17:08 crc kubenswrapper[4793]: E0127 20:17:08.322671 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a\\\"\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" podUID="d778ab64-052f-4ffb-a80d-52d1807a499c" Jan 27 20:17:08 crc kubenswrapper[4793]: E0127 20:17:08.353159 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8" Jan 27 20:17:08 crc kubenswrapper[4793]: E0127 20:17:08.354224 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d7k9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5bf474d74f-x6b5g_openshift-operators(5f176291-9b04-49fc-ad48-fe1552b2bcaf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:17:08 crc kubenswrapper[4793]: E0127 20:17:08.356012 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" podUID="5f176291-9b04-49fc-ad48-fe1552b2bcaf" Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.327570 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" event={"ID":"15c11c1c-e768-4e99-ac32-157dbd118043","Type":"ContainerStarted","Data":"a140d29d8ea9be813ccb022839e5b972c1305e4361303dd626bf0785e9911931"} Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.329338 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" event={"ID":"cdf9943f-a502-4e73-bbb7-5b638de02443","Type":"ContainerStarted","Data":"f28f5fbfda89d88ee3baf145fc337913b7f9ec7f87228bee65135cec8b853a4e"} Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.330695 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" event={"ID":"b58cf118-0ae5-43f3-bf3c-f5f01eb636ba","Type":"ContainerStarted","Data":"35af3ae9326898edbdd2131122837da78a42882f0cbef008aff92ab4a117a22a"} Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.331008 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:17:09 crc kubenswrapper[4793]: E0127 20:17:09.331683 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8\\\"\"" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" podUID="5f176291-9b04-49fc-ad48-fe1552b2bcaf" Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.333065 4793 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-ml85d container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.33:8081/healthz\": dial tcp 10.217.0.33:8081: connect: connection refused" start-of-body= Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.333115 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" podUID="b58cf118-0ae5-43f3-bf3c-f5f01eb636ba" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.33:8081/healthz\": dial tcp 10.217.0.33:8081: connect: connection refused" Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.343097 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59" podStartSLOduration=2.49922001 podStartE2EDuration="18.343084602s" podCreationTimestamp="2026-01-27 20:16:51 +0000 UTC" firstStartedPulling="2026-01-27 20:16:52.517419349 +0000 UTC m=+837.907672505" lastFinishedPulling="2026-01-27 20:17:08.361283941 +0000 UTC m=+853.751537097" observedRunningTime="2026-01-27 20:17:09.342515178 +0000 UTC m=+854.732768344" watchObservedRunningTime="2026-01-27 20:17:09.343084602 +0000 UTC m=+854.733337758" Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.443258 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" podStartSLOduration=3.007843419 podStartE2EDuration="18.443244431s" podCreationTimestamp="2026-01-27 20:16:51 +0000 UTC" firstStartedPulling="2026-01-27 20:16:52.953194976 +0000 UTC m=+838.343448132" lastFinishedPulling="2026-01-27 20:17:08.388595988 +0000 UTC m=+853.778849144" observedRunningTime="2026-01-27 20:17:09.44156647 +0000 UTC m=+854.831819636" watchObservedRunningTime="2026-01-27 20:17:09.443244431 +0000 UTC m=+854.833497587" Jan 27 20:17:09 crc kubenswrapper[4793]: I0127 20:17:09.466648 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh" podStartSLOduration=2.5355808399999997 podStartE2EDuration="18.46662111s" podCreationTimestamp="2026-01-27 20:16:51 +0000 UTC" firstStartedPulling="2026-01-27 20:16:52.427731419 +0000 UTC m=+837.817984585" lastFinishedPulling="2026-01-27 20:17:08.358771699 +0000 UTC m=+853.749024855" observedRunningTime="2026-01-27 20:17:09.464649851 +0000 UTC m=+854.854903037" watchObservedRunningTime="2026-01-27 20:17:09.46662111 +0000 UTC m=+854.856874276" Jan 27 20:17:10 crc kubenswrapper[4793]: I0127 20:17:10.337270 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-ml85d" Jan 27 20:17:24 crc kubenswrapper[4793]: I0127 20:17:24.480221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" event={"ID":"d778ab64-052f-4ffb-a80d-52d1807a499c","Type":"ContainerStarted","Data":"d7f6dd086d722313a187e2ea005e00e34eef8b739eb548997068f86fdb7ae436"} Jan 27 20:17:24 crc kubenswrapper[4793]: I0127 20:17:24.498910 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-557c7" podStartSLOduration=4.717956435 podStartE2EDuration="34.498886523s" podCreationTimestamp="2026-01-27 20:16:50 +0000 UTC" firstStartedPulling="2026-01-27 20:16:52.019661519 +0000 UTC m=+837.409914675" lastFinishedPulling="2026-01-27 20:17:21.800591597 +0000 UTC m=+867.190844763" observedRunningTime="2026-01-27 20:17:24.495641653 +0000 UTC m=+869.885894879" watchObservedRunningTime="2026-01-27 20:17:24.498886523 +0000 UTC m=+869.889139679" Jan 27 20:17:25 crc kubenswrapper[4793]: I0127 20:17:25.487874 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" event={"ID":"5f176291-9b04-49fc-ad48-fe1552b2bcaf","Type":"ContainerStarted","Data":"77555cd4a7546d2d048ae9bde0a4c0c3fd00b7703f2dc41e1282d24abc9be05b"} Jan 27 20:17:25 crc kubenswrapper[4793]: I0127 20:17:25.489519 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:17:31 crc kubenswrapper[4793]: I0127 20:17:31.785672 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" Jan 27 20:17:31 crc kubenswrapper[4793]: I0127 20:17:31.827502 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-x6b5g" podStartSLOduration=8.853623078 podStartE2EDuration="40.827485743s" podCreationTimestamp="2026-01-27 20:16:51 +0000 UTC" firstStartedPulling="2026-01-27 20:16:52.852507733 +0000 UTC m=+838.242760889" lastFinishedPulling="2026-01-27 20:17:24.826370398 +0000 UTC m=+870.216623554" observedRunningTime="2026-01-27 20:17:25.53590917 +0000 UTC m=+870.926162326" watchObservedRunningTime="2026-01-27 20:17:31.827485743 +0000 UTC m=+877.217738899" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.589849 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg"] Jan 27 20:17:51 crc kubenswrapper[4793]: E0127 20:17:51.590490 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="extract-utilities" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.590502 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="extract-utilities" Jan 27 20:17:51 crc kubenswrapper[4793]: E0127 20:17:51.590512 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="registry-server" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.590518 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="registry-server" Jan 27 20:17:51 crc kubenswrapper[4793]: E0127 20:17:51.590527 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="extract-content" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.590532 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="extract-content" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.590659 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e32659-2b3c-4d16-8a8c-6442a3298534" containerName="registry-server" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.591391 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.592761 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.601788 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg"] Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.778970 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.779045 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvmlc\" (UniqueName: \"kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.779210 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.880419 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.880485 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvmlc\" (UniqueName: \"kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.880525 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.881063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.881059 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.902509 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvmlc\" (UniqueName: \"kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:51 crc kubenswrapper[4793]: I0127 20:17:51.910679 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:52 crc kubenswrapper[4793]: I0127 20:17:52.904841 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg"] Jan 27 20:17:53 crc kubenswrapper[4793]: I0127 20:17:53.866136 4793 generic.go:334] "Generic (PLEG): container finished" podID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerID="21a5352aba7a9d4f246956e37927cd83961c1977716de326a48af954b5c604c6" exitCode=0 Jan 27 20:17:53 crc kubenswrapper[4793]: I0127 20:17:53.866223 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" event={"ID":"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b","Type":"ContainerDied","Data":"21a5352aba7a9d4f246956e37927cd83961c1977716de326a48af954b5c604c6"} Jan 27 20:17:53 crc kubenswrapper[4793]: I0127 20:17:53.866487 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" event={"ID":"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b","Type":"ContainerStarted","Data":"2f828499915feba1622b01a2d22cb770359039d2627e77d4ea7c9891313362d6"} Jan 27 20:17:55 crc kubenswrapper[4793]: I0127 20:17:55.885475 4793 generic.go:334] "Generic (PLEG): container finished" podID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerID="be1d58833c85fc88274b6cf6291d9e34855f9c7855d58f7df15ead45201ad876" exitCode=0 Jan 27 20:17:55 crc kubenswrapper[4793]: I0127 20:17:55.885801 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" event={"ID":"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b","Type":"ContainerDied","Data":"be1d58833c85fc88274b6cf6291d9e34855f9c7855d58f7df15ead45201ad876"} Jan 27 20:17:56 crc kubenswrapper[4793]: I0127 20:17:56.895730 4793 generic.go:334] "Generic (PLEG): container finished" podID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerID="02f32fd4c1eb55740f34094115a9505a84740561be51a5da3e3492a2349537b9" exitCode=0 Jan 27 20:17:56 crc kubenswrapper[4793]: I0127 20:17:56.895779 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" event={"ID":"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b","Type":"ContainerDied","Data":"02f32fd4c1eb55740f34094115a9505a84740561be51a5da3e3492a2349537b9"} Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.124947 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.262271 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util\") pod \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.262326 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle\") pod \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.262416 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvmlc\" (UniqueName: \"kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc\") pod \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\" (UID: \"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b\") " Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.263501 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle" (OuterVolumeSpecName: "bundle") pod "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" (UID: "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.267233 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc" (OuterVolumeSpecName: "kube-api-access-nvmlc") pod "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" (UID: "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b"). InnerVolumeSpecName "kube-api-access-nvmlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.279097 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util" (OuterVolumeSpecName: "util") pod "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" (UID: "1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.364136 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvmlc\" (UniqueName: \"kubernetes.io/projected/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-kube-api-access-nvmlc\") on node \"crc\" DevicePath \"\"" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.364190 4793 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-util\") on node \"crc\" DevicePath \"\"" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.364203 4793 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.911980 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" event={"ID":"1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b","Type":"ContainerDied","Data":"2f828499915feba1622b01a2d22cb770359039d2627e77d4ea7c9891313362d6"} Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.912021 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f828499915feba1622b01a2d22cb770359039d2627e77d4ea7c9891313362d6" Jan 27 20:17:58 crc kubenswrapper[4793]: I0127 20:17:58.912034 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.067568 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-qmrh5"] Jan 27 20:18:00 crc kubenswrapper[4793]: E0127 20:18:00.068090 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="pull" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.068102 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="pull" Jan 27 20:18:00 crc kubenswrapper[4793]: E0127 20:18:00.068115 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="util" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.068121 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="util" Jan 27 20:18:00 crc kubenswrapper[4793]: E0127 20:18:00.068142 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="extract" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.068148 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="extract" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.068239 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b" containerName="extract" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.068638 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.070489 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.071045 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.071179 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-rzgpw" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.080677 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-qmrh5"] Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.185041 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz79x\" (UniqueName: \"kubernetes.io/projected/042b00b6-81fb-45a3-92be-37d3666ade02-kube-api-access-xz79x\") pod \"nmstate-operator-646758c888-qmrh5\" (UID: \"042b00b6-81fb-45a3-92be-37d3666ade02\") " pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.287059 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz79x\" (UniqueName: \"kubernetes.io/projected/042b00b6-81fb-45a3-92be-37d3666ade02-kube-api-access-xz79x\") pod \"nmstate-operator-646758c888-qmrh5\" (UID: \"042b00b6-81fb-45a3-92be-37d3666ade02\") " pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.308673 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz79x\" (UniqueName: \"kubernetes.io/projected/042b00b6-81fb-45a3-92be-37d3666ade02-kube-api-access-xz79x\") pod \"nmstate-operator-646758c888-qmrh5\" (UID: \"042b00b6-81fb-45a3-92be-37d3666ade02\") " pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.395830 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.605894 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-qmrh5"] Jan 27 20:18:00 crc kubenswrapper[4793]: W0127 20:18:00.614137 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod042b00b6_81fb_45a3_92be_37d3666ade02.slice/crio-efed15bd18ba715fa5aa321c7b95637e3b5b09cbc779fa4dfc4d1219a53322a9 WatchSource:0}: Error finding container efed15bd18ba715fa5aa321c7b95637e3b5b09cbc779fa4dfc4d1219a53322a9: Status 404 returned error can't find the container with id efed15bd18ba715fa5aa321c7b95637e3b5b09cbc779fa4dfc4d1219a53322a9 Jan 27 20:18:00 crc kubenswrapper[4793]: I0127 20:18:00.924163 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" event={"ID":"042b00b6-81fb-45a3-92be-37d3666ade02","Type":"ContainerStarted","Data":"efed15bd18ba715fa5aa321c7b95637e3b5b09cbc779fa4dfc4d1219a53322a9"} Jan 27 20:18:03 crc kubenswrapper[4793]: I0127 20:18:03.944929 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" event={"ID":"042b00b6-81fb-45a3-92be-37d3666ade02","Type":"ContainerStarted","Data":"f3d88cc8c44668a1bf4911b5ec35acd74ecc5cfc4f9e6bc7170bc58ab3d8072e"} Jan 27 20:18:03 crc kubenswrapper[4793]: I0127 20:18:03.962676 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-qmrh5" podStartSLOduration=1.583005656 podStartE2EDuration="3.962652233s" podCreationTimestamp="2026-01-27 20:18:00 +0000 UTC" firstStartedPulling="2026-01-27 20:18:00.616300191 +0000 UTC m=+906.006553347" lastFinishedPulling="2026-01-27 20:18:02.995946768 +0000 UTC m=+908.386199924" observedRunningTime="2026-01-27 20:18:03.960747207 +0000 UTC m=+909.351000383" watchObservedRunningTime="2026-01-27 20:18:03.962652233 +0000 UTC m=+909.352905389" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.045194 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jvz78"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.046113 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.048705 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-hqhj7" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.055389 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.057261 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.071192 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.093789 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.111448 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-gkmrf"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.112358 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.138814 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jvz78"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.147731 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwsxm\" (UniqueName: \"kubernetes.io/projected/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-kube-api-access-bwsxm\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.147774 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.147803 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ckkb\" (UniqueName: \"kubernetes.io/projected/84de09db-daa3-41cb-b012-c231dff18838-kube-api-access-5ckkb\") pod \"nmstate-metrics-54757c584b-jvz78\" (UID: \"84de09db-daa3-41cb-b012-c231dff18838\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.201853 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.202743 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.206326 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-jxxs6" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.206816 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.207120 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.211645 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249479 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7vp8\" (UniqueName: \"kubernetes.io/projected/fb3ff166-af70-4d4a-b729-6a13686fa910-kube-api-access-z7vp8\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249580 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwsxm\" (UniqueName: \"kubernetes.io/projected/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-kube-api-access-bwsxm\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249627 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-ovs-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249651 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-dbus-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249671 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ckkb\" (UniqueName: \"kubernetes.io/projected/84de09db-daa3-41cb-b012-c231dff18838-kube-api-access-5ckkb\") pod \"nmstate-metrics-54757c584b-jvz78\" (UID: \"84de09db-daa3-41cb-b012-c231dff18838\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.249689 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-nmstate-lock\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: E0127 20:18:05.249844 4793 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 27 20:18:05 crc kubenswrapper[4793]: E0127 20:18:05.249890 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair podName:21d1a1e4-f1c8-41fd-9ac0-91ad4003215a nodeName:}" failed. No retries permitted until 2026-01-27 20:18:05.749873359 +0000 UTC m=+911.140126515 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-tqg4n" (UID: "21d1a1e4-f1c8-41fd-9ac0-91ad4003215a") : secret "openshift-nmstate-webhook" not found Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.269709 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwsxm\" (UniqueName: \"kubernetes.io/projected/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-kube-api-access-bwsxm\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.270476 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ckkb\" (UniqueName: \"kubernetes.io/projected/84de09db-daa3-41cb-b012-c231dff18838-kube-api-access-5ckkb\") pod \"nmstate-metrics-54757c584b-jvz78\" (UID: \"84de09db-daa3-41cb-b012-c231dff18838\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.350934 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f65ddc46-2da9-403d-bdd9-f9c5825fca15-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.350980 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f65ddc46-2da9-403d-bdd9-f9c5825fca15-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351008 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7vp8\" (UniqueName: \"kubernetes.io/projected/fb3ff166-af70-4d4a-b729-6a13686fa910-kube-api-access-z7vp8\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351075 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppw4t\" (UniqueName: \"kubernetes.io/projected/f65ddc46-2da9-403d-bdd9-f9c5825fca15-kube-api-access-ppw4t\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351172 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-ovs-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351207 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-dbus-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351252 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-nmstate-lock\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351277 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-ovs-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351313 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-nmstate-lock\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.351478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fb3ff166-af70-4d4a-b729-6a13686fa910-dbus-socket\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.369191 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7vp8\" (UniqueName: \"kubernetes.io/projected/fb3ff166-af70-4d4a-b729-6a13686fa910-kube-api-access-z7vp8\") pod \"nmstate-handler-gkmrf\" (UID: \"fb3ff166-af70-4d4a-b729-6a13686fa910\") " pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.396632 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-dc6769df5-97xnd"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.397499 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.401055 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.409595 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-dc6769df5-97xnd"] Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.426208 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.452153 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f65ddc46-2da9-403d-bdd9-f9c5825fca15-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.452192 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f65ddc46-2da9-403d-bdd9-f9c5825fca15-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.452214 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppw4t\" (UniqueName: \"kubernetes.io/projected/f65ddc46-2da9-403d-bdd9-f9c5825fca15-kube-api-access-ppw4t\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.453947 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f65ddc46-2da9-403d-bdd9-f9c5825fca15-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.478123 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppw4t\" (UniqueName: \"kubernetes.io/projected/f65ddc46-2da9-403d-bdd9-f9c5825fca15-kube-api-access-ppw4t\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563200 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-oauth-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563281 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-service-ca\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563313 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-oauth-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563345 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563367 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563387 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h76r4\" (UniqueName: \"kubernetes.io/projected/f891c7d9-14aa-499c-9453-6b65bfb01f4a-kube-api-access-h76r4\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.563427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-trusted-ca-bundle\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.565342 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f65ddc46-2da9-403d-bdd9-f9c5825fca15-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrr9k\" (UID: \"f65ddc46-2da9-403d-bdd9-f9c5825fca15\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: W0127 20:18:05.590284 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb3ff166_af70_4d4a_b729_6a13686fa910.slice/crio-2deca2ffaf1ca20197109ffc61ecfd75a5eb63273e485edc48fdf3bb36bf713b WatchSource:0}: Error finding container 2deca2ffaf1ca20197109ffc61ecfd75a5eb63273e485edc48fdf3bb36bf713b: Status 404 returned error can't find the container with id 2deca2ffaf1ca20197109ffc61ecfd75a5eb63273e485edc48fdf3bb36bf713b Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.664944 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-oauth-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.664996 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.665024 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.665051 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h76r4\" (UniqueName: \"kubernetes.io/projected/f891c7d9-14aa-499c-9453-6b65bfb01f4a-kube-api-access-h76r4\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.665102 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-trusted-ca-bundle\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.665180 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-oauth-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.665221 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-service-ca\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.671669 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.674150 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-oauth-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.674308 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-trusted-ca-bundle\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.675234 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f891c7d9-14aa-499c-9453-6b65bfb01f4a-service-ca\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.677737 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-oauth-config\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.677823 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f891c7d9-14aa-499c-9453-6b65bfb01f4a-console-serving-cert\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.683910 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h76r4\" (UniqueName: \"kubernetes.io/projected/f891c7d9-14aa-499c-9453-6b65bfb01f4a-kube-api-access-h76r4\") pod \"console-dc6769df5-97xnd\" (UID: \"f891c7d9-14aa-499c-9453-6b65bfb01f4a\") " pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.785427 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.785664 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.793482 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/21d1a1e4-f1c8-41fd-9ac0-91ad4003215a-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-tqg4n\" (UID: \"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.816043 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jvz78"] Jan 27 20:18:05 crc kubenswrapper[4793]: W0127 20:18:05.817261 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84de09db_daa3_41cb_b012_c231dff18838.slice/crio-a576f1e9a2ac4d0e7c96a4fc24f9f8a57cfc1823ce438822e6658d17cc15a391 WatchSource:0}: Error finding container a576f1e9a2ac4d0e7c96a4fc24f9f8a57cfc1823ce438822e6658d17cc15a391: Status 404 returned error can't find the container with id a576f1e9a2ac4d0e7c96a4fc24f9f8a57cfc1823ce438822e6658d17cc15a391 Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.819043 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.959830 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" event={"ID":"84de09db-daa3-41cb-b012-c231dff18838","Type":"ContainerStarted","Data":"a576f1e9a2ac4d0e7c96a4fc24f9f8a57cfc1823ce438822e6658d17cc15a391"} Jan 27 20:18:05 crc kubenswrapper[4793]: I0127 20:18:05.962044 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gkmrf" event={"ID":"fb3ff166-af70-4d4a-b729-6a13686fa910","Type":"ContainerStarted","Data":"2deca2ffaf1ca20197109ffc61ecfd75a5eb63273e485edc48fdf3bb36bf713b"} Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.005040 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-dc6769df5-97xnd"] Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.007918 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.073689 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k"] Jan 27 20:18:06 crc kubenswrapper[4793]: W0127 20:18:06.078101 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf65ddc46_2da9_403d_bdd9_f9c5825fca15.slice/crio-6d8a3bdcd8e57972d88b1b1a390ad1fd2cfc57bda04a08a076c59a992cbe0344 WatchSource:0}: Error finding container 6d8a3bdcd8e57972d88b1b1a390ad1fd2cfc57bda04a08a076c59a992cbe0344: Status 404 returned error can't find the container with id 6d8a3bdcd8e57972d88b1b1a390ad1fd2cfc57bda04a08a076c59a992cbe0344 Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.415492 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n"] Jan 27 20:18:06 crc kubenswrapper[4793]: W0127 20:18:06.418719 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21d1a1e4_f1c8_41fd_9ac0_91ad4003215a.slice/crio-0385549292e286304f865e061f68987bffa5053cfe3943b6934bea29a7e53622 WatchSource:0}: Error finding container 0385549292e286304f865e061f68987bffa5053cfe3943b6934bea29a7e53622: Status 404 returned error can't find the container with id 0385549292e286304f865e061f68987bffa5053cfe3943b6934bea29a7e53622 Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.969029 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" event={"ID":"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a","Type":"ContainerStarted","Data":"0385549292e286304f865e061f68987bffa5053cfe3943b6934bea29a7e53622"} Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.970477 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" event={"ID":"f65ddc46-2da9-403d-bdd9-f9c5825fca15","Type":"ContainerStarted","Data":"6d8a3bdcd8e57972d88b1b1a390ad1fd2cfc57bda04a08a076c59a992cbe0344"} Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.972255 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-dc6769df5-97xnd" event={"ID":"f891c7d9-14aa-499c-9453-6b65bfb01f4a","Type":"ContainerStarted","Data":"c70314b4d6e64cdbc6121466cf6c78df6295301209e8cb39c39d68a82b67ef10"} Jan 27 20:18:06 crc kubenswrapper[4793]: I0127 20:18:06.972308 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-dc6769df5-97xnd" event={"ID":"f891c7d9-14aa-499c-9453-6b65bfb01f4a","Type":"ContainerStarted","Data":"65281ea38cb4dd9fd76dd946ba5c1e0a4f70b320877ab0633489958b45dec29a"} Jan 27 20:18:09 crc kubenswrapper[4793]: I0127 20:18:09.996174 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-gkmrf" event={"ID":"fb3ff166-af70-4d4a-b729-6a13686fa910","Type":"ContainerStarted","Data":"d702fc6f853c24ace94defe374b2da1167fa9945493c1422c8c81af441a0c794"} Jan 27 20:18:09 crc kubenswrapper[4793]: I0127 20:18:09.996735 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:09 crc kubenswrapper[4793]: I0127 20:18:09.997851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" event={"ID":"84de09db-daa3-41cb-b012-c231dff18838","Type":"ContainerStarted","Data":"dc0a44096b988e5a3e7d36bab5b2e41b0fc3a512e35371680b7432a50de5d4a1"} Jan 27 20:18:09 crc kubenswrapper[4793]: I0127 20:18:09.999117 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" event={"ID":"21d1a1e4-f1c8-41fd-9ac0-91ad4003215a","Type":"ContainerStarted","Data":"5205255c20df5c3c7164cc311a9791b04a9fc0f4395527c1607edcc4edca0e0d"} Jan 27 20:18:09 crc kubenswrapper[4793]: I0127 20:18:09.999579 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:10 crc kubenswrapper[4793]: I0127 20:18:10.001214 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" event={"ID":"f65ddc46-2da9-403d-bdd9-f9c5825fca15","Type":"ContainerStarted","Data":"89e6004b0aa71cac7a9a44b83554ffdbbcbc9f263678ff528b017dda1f5a998b"} Jan 27 20:18:10 crc kubenswrapper[4793]: I0127 20:18:10.016028 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-dc6769df5-97xnd" podStartSLOduration=5.016009643 podStartE2EDuration="5.016009643s" podCreationTimestamp="2026-01-27 20:18:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:18:06.990941632 +0000 UTC m=+912.381194808" watchObservedRunningTime="2026-01-27 20:18:10.016009643 +0000 UTC m=+915.406262799" Jan 27 20:18:10 crc kubenswrapper[4793]: I0127 20:18:10.018207 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-gkmrf" podStartSLOduration=1.382492449 podStartE2EDuration="5.018198707s" podCreationTimestamp="2026-01-27 20:18:05 +0000 UTC" firstStartedPulling="2026-01-27 20:18:05.592977408 +0000 UTC m=+910.983230564" lastFinishedPulling="2026-01-27 20:18:09.228683666 +0000 UTC m=+914.618936822" observedRunningTime="2026-01-27 20:18:10.012294442 +0000 UTC m=+915.402547598" watchObservedRunningTime="2026-01-27 20:18:10.018198707 +0000 UTC m=+915.408451863" Jan 27 20:18:10 crc kubenswrapper[4793]: I0127 20:18:10.028950 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrr9k" podStartSLOduration=1.881020274 podStartE2EDuration="5.028932431s" podCreationTimestamp="2026-01-27 20:18:05 +0000 UTC" firstStartedPulling="2026-01-27 20:18:06.08079001 +0000 UTC m=+911.471043166" lastFinishedPulling="2026-01-27 20:18:09.228702147 +0000 UTC m=+914.618955323" observedRunningTime="2026-01-27 20:18:10.027395424 +0000 UTC m=+915.417648590" watchObservedRunningTime="2026-01-27 20:18:10.028932431 +0000 UTC m=+915.419185587" Jan 27 20:18:10 crc kubenswrapper[4793]: I0127 20:18:10.052650 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" podStartSLOduration=2.242365664 podStartE2EDuration="5.052610235s" podCreationTimestamp="2026-01-27 20:18:05 +0000 UTC" firstStartedPulling="2026-01-27 20:18:06.421179172 +0000 UTC m=+911.811432328" lastFinishedPulling="2026-01-27 20:18:09.231423743 +0000 UTC m=+914.621676899" observedRunningTime="2026-01-27 20:18:10.050116043 +0000 UTC m=+915.440369219" watchObservedRunningTime="2026-01-27 20:18:10.052610235 +0000 UTC m=+915.442863401" Jan 27 20:18:12 crc kubenswrapper[4793]: I0127 20:18:12.012990 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" event={"ID":"84de09db-daa3-41cb-b012-c231dff18838","Type":"ContainerStarted","Data":"ccaa9eaa80c97a0fea36b497f4c1438569c322295bca90f81c85c9d5cfcc3c32"} Jan 27 20:18:12 crc kubenswrapper[4793]: I0127 20:18:12.035753 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-jvz78" podStartSLOduration=1.4304575800000001 podStartE2EDuration="7.035708327s" podCreationTimestamp="2026-01-27 20:18:05 +0000 UTC" firstStartedPulling="2026-01-27 20:18:05.81965994 +0000 UTC m=+911.209913096" lastFinishedPulling="2026-01-27 20:18:11.424910687 +0000 UTC m=+916.815163843" observedRunningTime="2026-01-27 20:18:12.028280555 +0000 UTC m=+917.418533741" watchObservedRunningTime="2026-01-27 20:18:12.035708327 +0000 UTC m=+917.425961483" Jan 27 20:18:15 crc kubenswrapper[4793]: I0127 20:18:15.450696 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-gkmrf" Jan 27 20:18:15 crc kubenswrapper[4793]: I0127 20:18:15.786673 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:15 crc kubenswrapper[4793]: I0127 20:18:15.786744 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:15 crc kubenswrapper[4793]: I0127 20:18:15.792782 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:16 crc kubenswrapper[4793]: I0127 20:18:16.039110 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-dc6769df5-97xnd" Jan 27 20:18:16 crc kubenswrapper[4793]: I0127 20:18:16.091101 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.562995 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.564820 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.577893 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.677821 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.677899 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh5fb\" (UniqueName: \"kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.677939 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.779405 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh5fb\" (UniqueName: \"kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.779465 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.779570 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.780321 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.780465 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.803296 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh5fb\" (UniqueName: \"kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb\") pod \"community-operators-mnmpp\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:17 crc kubenswrapper[4793]: I0127 20:18:17.884047 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:18 crc kubenswrapper[4793]: I0127 20:18:18.685078 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:19 crc kubenswrapper[4793]: I0127 20:18:19.101303 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerStarted","Data":"8efff6f8b80a718a0f2457018d0b3fbf631588f855a0cb4db8e24ab0d650fc15"} Jan 27 20:18:20 crc kubenswrapper[4793]: I0127 20:18:20.109384 4793 generic.go:334] "Generic (PLEG): container finished" podID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerID="513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0" exitCode=0 Jan 27 20:18:20 crc kubenswrapper[4793]: I0127 20:18:20.109505 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerDied","Data":"513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0"} Jan 27 20:18:22 crc kubenswrapper[4793]: I0127 20:18:22.127937 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerStarted","Data":"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc"} Jan 27 20:18:23 crc kubenswrapper[4793]: I0127 20:18:23.134868 4793 generic.go:334] "Generic (PLEG): container finished" podID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerID="2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc" exitCode=0 Jan 27 20:18:23 crc kubenswrapper[4793]: I0127 20:18:23.134916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerDied","Data":"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc"} Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.139237 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.141470 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.153479 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.154676 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerStarted","Data":"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443"} Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.198445 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mnmpp" podStartSLOduration=3.772135109 podStartE2EDuration="7.19842526s" podCreationTimestamp="2026-01-27 20:18:17 +0000 UTC" firstStartedPulling="2026-01-27 20:18:20.113038989 +0000 UTC m=+925.503292145" lastFinishedPulling="2026-01-27 20:18:23.53932914 +0000 UTC m=+928.929582296" observedRunningTime="2026-01-27 20:18:24.196363949 +0000 UTC m=+929.586617135" watchObservedRunningTime="2026-01-27 20:18:24.19842526 +0000 UTC m=+929.588678416" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.232422 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.232492 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpbwd\" (UniqueName: \"kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.232530 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.334338 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.334841 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.334890 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpbwd\" (UniqueName: \"kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.335035 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.335375 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.359448 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpbwd\" (UniqueName: \"kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd\") pod \"redhat-marketplace-l5gn2\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.467454 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:24 crc kubenswrapper[4793]: I0127 20:18:24.984862 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:25 crc kubenswrapper[4793]: I0127 20:18:25.162184 4793 generic.go:334] "Generic (PLEG): container finished" podID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerID="0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4" exitCode=0 Jan 27 20:18:25 crc kubenswrapper[4793]: I0127 20:18:25.162262 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerDied","Data":"0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4"} Jan 27 20:18:25 crc kubenswrapper[4793]: I0127 20:18:25.162320 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerStarted","Data":"64b557b26f6c5614b109e17ac36b66ddcdefc800e99e614054e6376d6b3127b3"} Jan 27 20:18:26 crc kubenswrapper[4793]: I0127 20:18:26.014772 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-tqg4n" Jan 27 20:18:26 crc kubenswrapper[4793]: I0127 20:18:26.169763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerStarted","Data":"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4"} Jan 27 20:18:27 crc kubenswrapper[4793]: I0127 20:18:27.892093 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:27 crc kubenswrapper[4793]: I0127 20:18:27.894056 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:27 crc kubenswrapper[4793]: I0127 20:18:27.978050 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:28 crc kubenswrapper[4793]: I0127 20:18:28.181665 4793 generic.go:334] "Generic (PLEG): container finished" podID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerID="a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4" exitCode=0 Jan 27 20:18:28 crc kubenswrapper[4793]: I0127 20:18:28.181916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerDied","Data":"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4"} Jan 27 20:18:28 crc kubenswrapper[4793]: I0127 20:18:28.232936 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:29 crc kubenswrapper[4793]: I0127 20:18:29.189941 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerStarted","Data":"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392"} Jan 27 20:18:29 crc kubenswrapper[4793]: I0127 20:18:29.249459 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l5gn2" podStartSLOduration=1.793492118 podStartE2EDuration="5.249439711s" podCreationTimestamp="2026-01-27 20:18:24 +0000 UTC" firstStartedPulling="2026-01-27 20:18:25.163630257 +0000 UTC m=+930.553883403" lastFinishedPulling="2026-01-27 20:18:28.61957783 +0000 UTC m=+934.009830996" observedRunningTime="2026-01-27 20:18:29.247440641 +0000 UTC m=+934.637693797" watchObservedRunningTime="2026-01-27 20:18:29.249439711 +0000 UTC m=+934.639692867" Jan 27 20:18:29 crc kubenswrapper[4793]: I0127 20:18:29.935876 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.196452 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mnmpp" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="registry-server" containerID="cri-o://d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443" gracePeriod=2 Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.563180 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.760995 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities\") pod \"6bc6931d-7a9c-4a96-961c-9d44981973f6\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.761040 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh5fb\" (UniqueName: \"kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb\") pod \"6bc6931d-7a9c-4a96-961c-9d44981973f6\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.761165 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content\") pod \"6bc6931d-7a9c-4a96-961c-9d44981973f6\" (UID: \"6bc6931d-7a9c-4a96-961c-9d44981973f6\") " Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.761952 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities" (OuterVolumeSpecName: "utilities") pod "6bc6931d-7a9c-4a96-961c-9d44981973f6" (UID: "6bc6931d-7a9c-4a96-961c-9d44981973f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.773796 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb" (OuterVolumeSpecName: "kube-api-access-vh5fb") pod "6bc6931d-7a9c-4a96-961c-9d44981973f6" (UID: "6bc6931d-7a9c-4a96-961c-9d44981973f6"). InnerVolumeSpecName "kube-api-access-vh5fb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.821203 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bc6931d-7a9c-4a96-961c-9d44981973f6" (UID: "6bc6931d-7a9c-4a96-961c-9d44981973f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.863123 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.863153 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc6931d-7a9c-4a96-961c-9d44981973f6-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:30 crc kubenswrapper[4793]: I0127 20:18:30.863163 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh5fb\" (UniqueName: \"kubernetes.io/projected/6bc6931d-7a9c-4a96-961c-9d44981973f6-kube-api-access-vh5fb\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.203439 4793 generic.go:334] "Generic (PLEG): container finished" podID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerID="d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443" exitCode=0 Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.203482 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerDied","Data":"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443"} Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.203509 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnmpp" event={"ID":"6bc6931d-7a9c-4a96-961c-9d44981973f6","Type":"ContainerDied","Data":"8efff6f8b80a718a0f2457018d0b3fbf631588f855a0cb4db8e24ab0d650fc15"} Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.203529 4793 scope.go:117] "RemoveContainer" containerID="d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.203672 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnmpp" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.224210 4793 scope.go:117] "RemoveContainer" containerID="2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.234517 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.239843 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mnmpp"] Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.264569 4793 scope.go:117] "RemoveContainer" containerID="513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.281926 4793 scope.go:117] "RemoveContainer" containerID="d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443" Jan 27 20:18:31 crc kubenswrapper[4793]: E0127 20:18:31.282614 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443\": container with ID starting with d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443 not found: ID does not exist" containerID="d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.282916 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443"} err="failed to get container status \"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443\": rpc error: code = NotFound desc = could not find container \"d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443\": container with ID starting with d40aa463e9f84432d3ce4b66803e9f828063d82f16715a82722531324823b443 not found: ID does not exist" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.283022 4793 scope.go:117] "RemoveContainer" containerID="2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc" Jan 27 20:18:31 crc kubenswrapper[4793]: E0127 20:18:31.283710 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc\": container with ID starting with 2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc not found: ID does not exist" containerID="2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.283784 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc"} err="failed to get container status \"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc\": rpc error: code = NotFound desc = could not find container \"2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc\": container with ID starting with 2e5be832fb8d799555e1011d6961361218a75c98c2f74a1860b28e67f83c68dc not found: ID does not exist" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.283819 4793 scope.go:117] "RemoveContainer" containerID="513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0" Jan 27 20:18:31 crc kubenswrapper[4793]: E0127 20:18:31.284114 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0\": container with ID starting with 513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0 not found: ID does not exist" containerID="513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.284208 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0"} err="failed to get container status \"513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0\": rpc error: code = NotFound desc = could not find container \"513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0\": container with ID starting with 513a6a84bd4489984af298f7b77f608b1e2c58f21369123119ee4655700573a0 not found: ID does not exist" Jan 27 20:18:31 crc kubenswrapper[4793]: I0127 20:18:31.813014 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" path="/var/lib/kubelet/pods/6bc6931d-7a9c-4a96-961c-9d44981973f6/volumes" Jan 27 20:18:34 crc kubenswrapper[4793]: I0127 20:18:34.471719 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:34 crc kubenswrapper[4793]: I0127 20:18:34.472368 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:34 crc kubenswrapper[4793]: I0127 20:18:34.565986 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:35 crc kubenswrapper[4793]: I0127 20:18:35.280304 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:35 crc kubenswrapper[4793]: I0127 20:18:35.335976 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.252947 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l5gn2" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="registry-server" containerID="cri-o://3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392" gracePeriod=2 Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.606579 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.760490 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities\") pod \"9bb8eb0c-813f-441b-807f-98d0e8be101b\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.760927 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpbwd\" (UniqueName: \"kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd\") pod \"9bb8eb0c-813f-441b-807f-98d0e8be101b\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.760960 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content\") pod \"9bb8eb0c-813f-441b-807f-98d0e8be101b\" (UID: \"9bb8eb0c-813f-441b-807f-98d0e8be101b\") " Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.761792 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities" (OuterVolumeSpecName: "utilities") pod "9bb8eb0c-813f-441b-807f-98d0e8be101b" (UID: "9bb8eb0c-813f-441b-807f-98d0e8be101b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.768194 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd" (OuterVolumeSpecName: "kube-api-access-fpbwd") pod "9bb8eb0c-813f-441b-807f-98d0e8be101b" (UID: "9bb8eb0c-813f-441b-807f-98d0e8be101b"). InnerVolumeSpecName "kube-api-access-fpbwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.784514 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bb8eb0c-813f-441b-807f-98d0e8be101b" (UID: "9bb8eb0c-813f-441b-807f-98d0e8be101b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.862179 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.862217 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bb8eb0c-813f-441b-807f-98d0e8be101b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:37 crc kubenswrapper[4793]: I0127 20:18:37.862228 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpbwd\" (UniqueName: \"kubernetes.io/projected/9bb8eb0c-813f-441b-807f-98d0e8be101b-kube-api-access-fpbwd\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.265110 4793 generic.go:334] "Generic (PLEG): container finished" podID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerID="3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392" exitCode=0 Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.265177 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5gn2" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.265212 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerDied","Data":"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392"} Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.265998 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5gn2" event={"ID":"9bb8eb0c-813f-441b-807f-98d0e8be101b","Type":"ContainerDied","Data":"64b557b26f6c5614b109e17ac36b66ddcdefc800e99e614054e6376d6b3127b3"} Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.266017 4793 scope.go:117] "RemoveContainer" containerID="3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.288241 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.293049 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5gn2"] Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.295969 4793 scope.go:117] "RemoveContainer" containerID="a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.314086 4793 scope.go:117] "RemoveContainer" containerID="0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.346688 4793 scope.go:117] "RemoveContainer" containerID="3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392" Jan 27 20:18:38 crc kubenswrapper[4793]: E0127 20:18:38.347367 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392\": container with ID starting with 3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392 not found: ID does not exist" containerID="3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.347471 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392"} err="failed to get container status \"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392\": rpc error: code = NotFound desc = could not find container \"3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392\": container with ID starting with 3dbd4fd7dc378f8f4ecc342d4e78a15cc55c923621a675fc97b5e52198ed5392 not found: ID does not exist" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.347587 4793 scope.go:117] "RemoveContainer" containerID="a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4" Jan 27 20:18:38 crc kubenswrapper[4793]: E0127 20:18:38.347908 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4\": container with ID starting with a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4 not found: ID does not exist" containerID="a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.347981 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4"} err="failed to get container status \"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4\": rpc error: code = NotFound desc = could not find container \"a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4\": container with ID starting with a27c2f42d4d3a23d28c25b6895d40691404fc35358ffc1c74d912cb823d368e4 not found: ID does not exist" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.348047 4793 scope.go:117] "RemoveContainer" containerID="0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4" Jan 27 20:18:38 crc kubenswrapper[4793]: E0127 20:18:38.348773 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4\": container with ID starting with 0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4 not found: ID does not exist" containerID="0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4" Jan 27 20:18:38 crc kubenswrapper[4793]: I0127 20:18:38.348831 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4"} err="failed to get container status \"0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4\": rpc error: code = NotFound desc = could not find container \"0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4\": container with ID starting with 0148a54ad30ec665ffa4620e1dbc7c8d78dc5ecca9897da757c57e8abdbd75c4 not found: ID does not exist" Jan 27 20:18:39 crc kubenswrapper[4793]: I0127 20:18:39.811795 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" path="/var/lib/kubelet/pods/9bb8eb0c-813f-441b-807f-98d0e8be101b/volumes" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.168028 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-slbcq" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" containerID="cri-o://f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8" gracePeriod=15 Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.507433 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-slbcq_f8ceccd4-2d2a-45c9-a255-2d6763b7d150/console/0.log" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.507744 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598112 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft"] Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598374 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598390 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598404 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="extract-content" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598411 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="extract-content" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598422 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="extract-utilities" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598429 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="extract-utilities" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598447 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="extract-utilities" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598455 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="extract-utilities" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598467 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598474 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598487 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598494 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" Jan 27 20:18:41 crc kubenswrapper[4793]: E0127 20:18:41.598506 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="extract-content" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598514 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="extract-content" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598646 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerName="console" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598663 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bc6931d-7a9c-4a96-961c-9d44981973f6" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.598670 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bb8eb0c-813f-441b-807f-98d0e8be101b" containerName="registry-server" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.599996 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.602501 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.608390 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft"] Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629181 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629244 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pkzz\" (UniqueName: \"kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629290 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629355 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629386 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629406 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629444 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert\") pod \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\" (UID: \"f8ceccd4-2d2a-45c9-a255-2d6763b7d150\") " Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629720 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629800 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.629878 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxzdw\" (UniqueName: \"kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.630640 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca" (OuterVolumeSpecName: "service-ca") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.630501 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.630795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config" (OuterVolumeSpecName: "console-config") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.631099 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.636419 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.637060 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz" (OuterVolumeSpecName: "kube-api-access-4pkzz") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "kube-api-access-4pkzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.637066 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "f8ceccd4-2d2a-45c9-a255-2d6763b7d150" (UID: "f8ceccd4-2d2a-45c9-a255-2d6763b7d150"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774052 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxzdw\" (UniqueName: \"kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774112 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774193 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774233 4793 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774244 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pkzz\" (UniqueName: \"kubernetes.io/projected/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-kube-api-access-4pkzz\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774254 4793 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774263 4793 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774272 4793 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774280 4793 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774290 4793 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f8ceccd4-2d2a-45c9-a255-2d6763b7d150-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774717 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.774914 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.791540 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxzdw\" (UniqueName: \"kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:41 crc kubenswrapper[4793]: I0127 20:18:41.920780 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.291777 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-slbcq_f8ceccd4-2d2a-45c9-a255-2d6763b7d150/console/0.log" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.292124 4793 generic.go:334] "Generic (PLEG): container finished" podID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" containerID="f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8" exitCode=2 Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.292158 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-slbcq" event={"ID":"f8ceccd4-2d2a-45c9-a255-2d6763b7d150","Type":"ContainerDied","Data":"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8"} Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.292182 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-slbcq" event={"ID":"f8ceccd4-2d2a-45c9-a255-2d6763b7d150","Type":"ContainerDied","Data":"54d0992a59b891c18b927532464cecf178526ab9fcb3498dbc4e90008c8c1d64"} Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.292198 4793 scope.go:117] "RemoveContainer" containerID="f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.292327 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-slbcq" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.327973 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.331911 4793 scope.go:117] "RemoveContainer" containerID="f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.332355 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-slbcq"] Jan 27 20:18:42 crc kubenswrapper[4793]: E0127 20:18:42.332386 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8\": container with ID starting with f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8 not found: ID does not exist" containerID="f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.332426 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8"} err="failed to get container status \"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8\": rpc error: code = NotFound desc = could not find container \"f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8\": container with ID starting with f6442722fc422334bd9d2a0dcd14715cb9b6ade79f92ad1c488cebc3ec9d0cb8 not found: ID does not exist" Jan 27 20:18:42 crc kubenswrapper[4793]: I0127 20:18:42.339510 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft"] Jan 27 20:18:43 crc kubenswrapper[4793]: I0127 20:18:43.299679 4793 generic.go:334] "Generic (PLEG): container finished" podID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerID="38ec3fd8201b04d5fd5bdd5217e90ce881e85702139e6f719c8f8db45d9f20e3" exitCode=0 Jan 27 20:18:43 crc kubenswrapper[4793]: I0127 20:18:43.299736 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" event={"ID":"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216","Type":"ContainerDied","Data":"38ec3fd8201b04d5fd5bdd5217e90ce881e85702139e6f719c8f8db45d9f20e3"} Jan 27 20:18:43 crc kubenswrapper[4793]: I0127 20:18:43.299946 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" event={"ID":"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216","Type":"ContainerStarted","Data":"e692674fae8c4a2640b0038e11a7e7ac5d39e253a559fe1144feadd42b7da6d4"} Jan 27 20:18:43 crc kubenswrapper[4793]: I0127 20:18:43.812538 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ceccd4-2d2a-45c9-a255-2d6763b7d150" path="/var/lib/kubelet/pods/f8ceccd4-2d2a-45c9-a255-2d6763b7d150/volumes" Jan 27 20:18:45 crc kubenswrapper[4793]: I0127 20:18:45.431882 4793 generic.go:334] "Generic (PLEG): container finished" podID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerID="5a4710919b94858000184e1a801877b17bd94086e3949a4dc3f2911b0091de95" exitCode=0 Jan 27 20:18:45 crc kubenswrapper[4793]: I0127 20:18:45.432163 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" event={"ID":"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216","Type":"ContainerDied","Data":"5a4710919b94858000184e1a801877b17bd94086e3949a4dc3f2911b0091de95"} Jan 27 20:18:46 crc kubenswrapper[4793]: I0127 20:18:46.441314 4793 generic.go:334] "Generic (PLEG): container finished" podID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerID="40c3f6f0975b45bde375076fb8501ccaf337b7bc105aef206fbe3da0bd103f78" exitCode=0 Jan 27 20:18:46 crc kubenswrapper[4793]: I0127 20:18:46.441418 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" event={"ID":"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216","Type":"ContainerDied","Data":"40c3f6f0975b45bde375076fb8501ccaf337b7bc105aef206fbe3da0bd103f78"} Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.700613 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.821209 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle\") pod \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.821279 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxzdw\" (UniqueName: \"kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw\") pod \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.821312 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util\") pod \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\" (UID: \"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216\") " Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.823077 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle" (OuterVolumeSpecName: "bundle") pod "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" (UID: "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.827419 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw" (OuterVolumeSpecName: "kube-api-access-jxzdw") pod "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" (UID: "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216"). InnerVolumeSpecName "kube-api-access-jxzdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.844017 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util" (OuterVolumeSpecName: "util") pod "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" (UID: "2545c17b-cfc8-46bc-ad6c-9d7b2e74a216"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.922667 4793 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.922709 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxzdw\" (UniqueName: \"kubernetes.io/projected/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-kube-api-access-jxzdw\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:47 crc kubenswrapper[4793]: I0127 20:18:47.922724 4793 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2545c17b-cfc8-46bc-ad6c-9d7b2e74a216-util\") on node \"crc\" DevicePath \"\"" Jan 27 20:18:48 crc kubenswrapper[4793]: I0127 20:18:48.457613 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" event={"ID":"2545c17b-cfc8-46bc-ad6c-9d7b2e74a216","Type":"ContainerDied","Data":"e692674fae8c4a2640b0038e11a7e7ac5d39e253a559fe1144feadd42b7da6d4"} Jan 27 20:18:48 crc kubenswrapper[4793]: I0127 20:18:48.457659 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e692674fae8c4a2640b0038e11a7e7ac5d39e253a559fe1144feadd42b7da6d4" Jan 27 20:18:48 crc kubenswrapper[4793]: I0127 20:18:48.457735 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.149197 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:18:54 crc kubenswrapper[4793]: E0127 20:18:54.149909 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="util" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.149927 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="util" Jan 27 20:18:54 crc kubenswrapper[4793]: E0127 20:18:54.149944 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="extract" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.149951 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="extract" Jan 27 20:18:54 crc kubenswrapper[4793]: E0127 20:18:54.149977 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="pull" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.149987 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="pull" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.150119 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2545c17b-cfc8-46bc-ad6c-9d7b2e74a216" containerName="extract" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.151059 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.164950 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.339748 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.340051 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.340166 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjctt\" (UniqueName: \"kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.441391 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.441759 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.441901 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjctt\" (UniqueName: \"kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.441910 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.442272 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.473483 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjctt\" (UniqueName: \"kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt\") pod \"certified-operators-r9jt2\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:54 crc kubenswrapper[4793]: I0127 20:18:54.770261 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:18:55 crc kubenswrapper[4793]: I0127 20:18:55.046977 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:18:55 crc kubenswrapper[4793]: I0127 20:18:55.677831 4793 generic.go:334] "Generic (PLEG): container finished" podID="6083fd40-42bf-4490-990f-bcfea8017677" containerID="609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd" exitCode=0 Jan 27 20:18:55 crc kubenswrapper[4793]: I0127 20:18:55.677955 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerDied","Data":"609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd"} Jan 27 20:18:55 crc kubenswrapper[4793]: I0127 20:18:55.678136 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerStarted","Data":"ceec88664fe2ed613ddc69a991b43bfa2a0d467207551571002e01b6729f915a"} Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.692349 4793 generic.go:334] "Generic (PLEG): container finished" podID="6083fd40-42bf-4490-990f-bcfea8017677" containerID="b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026" exitCode=0 Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.692447 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerDied","Data":"b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026"} Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.811688 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs"] Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.812574 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.814928 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.815108 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.815378 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-42mnt" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.817415 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.821196 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.830930 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs"] Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.920837 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-webhook-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.922107 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-apiservice-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:57 crc kubenswrapper[4793]: I0127 20:18:57.922481 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwb8t\" (UniqueName: \"kubernetes.io/projected/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-kube-api-access-xwb8t\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.024395 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-apiservice-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.024456 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwb8t\" (UniqueName: \"kubernetes.io/projected/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-kube-api-access-xwb8t\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.024582 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-webhook-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.031928 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-webhook-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.036999 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-apiservice-cert\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.047563 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwb8t\" (UniqueName: \"kubernetes.io/projected/3cb9730b-6fca-41ce-a6d8-9215c13b01e0-kube-api-access-xwb8t\") pod \"metallb-operator-controller-manager-77ddf8bbff-6skqs\" (UID: \"3cb9730b-6fca-41ce-a6d8-9215c13b01e0\") " pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.130159 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.247122 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7"] Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.247858 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.250829 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.250980 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-mrkfk" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.251111 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.266615 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7"] Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.443799 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-apiservice-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.443881 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td65n\" (UniqueName: \"kubernetes.io/projected/016b5dea-69f7-4abf-8e8a-72adc4922be9-kube-api-access-td65n\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.443959 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-webhook-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.545482 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-apiservice-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.545974 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td65n\" (UniqueName: \"kubernetes.io/projected/016b5dea-69f7-4abf-8e8a-72adc4922be9-kube-api-access-td65n\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.546071 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-webhook-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.551171 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-webhook-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.552285 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/016b5dea-69f7-4abf-8e8a-72adc4922be9-apiservice-cert\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.585360 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td65n\" (UniqueName: \"kubernetes.io/projected/016b5dea-69f7-4abf-8e8a-72adc4922be9-kube-api-access-td65n\") pod \"metallb-operator-webhook-server-8fb588c74-mjlw7\" (UID: \"016b5dea-69f7-4abf-8e8a-72adc4922be9\") " pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.710138 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerStarted","Data":"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33"} Jan 27 20:18:58 crc kubenswrapper[4793]: I0127 20:18:58.721884 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:18:59 crc kubenswrapper[4793]: I0127 20:18:59.011382 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r9jt2" podStartSLOduration=2.630550489 podStartE2EDuration="5.011359375s" podCreationTimestamp="2026-01-27 20:18:54 +0000 UTC" firstStartedPulling="2026-01-27 20:18:55.709685263 +0000 UTC m=+961.099938419" lastFinishedPulling="2026-01-27 20:18:58.090494149 +0000 UTC m=+963.480747305" observedRunningTime="2026-01-27 20:18:58.780672905 +0000 UTC m=+964.170926061" watchObservedRunningTime="2026-01-27 20:18:59.011359375 +0000 UTC m=+964.401612541" Jan 27 20:18:59 crc kubenswrapper[4793]: I0127 20:18:59.016909 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs"] Jan 27 20:18:59 crc kubenswrapper[4793]: W0127 20:18:59.025406 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cb9730b_6fca_41ce_a6d8_9215c13b01e0.slice/crio-d7953c732469ef6c12df370c84bd1b5697b22b67a7be1075531961ffdd5b6989 WatchSource:0}: Error finding container d7953c732469ef6c12df370c84bd1b5697b22b67a7be1075531961ffdd5b6989: Status 404 returned error can't find the container with id d7953c732469ef6c12df370c84bd1b5697b22b67a7be1075531961ffdd5b6989 Jan 27 20:18:59 crc kubenswrapper[4793]: I0127 20:18:59.509984 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7"] Jan 27 20:18:59 crc kubenswrapper[4793]: W0127 20:18:59.514783 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod016b5dea_69f7_4abf_8e8a_72adc4922be9.slice/crio-f46824a6cd1073a9b150d2d286a231a8ee512bb26b60d4e3bbb4d8369e382ae1 WatchSource:0}: Error finding container f46824a6cd1073a9b150d2d286a231a8ee512bb26b60d4e3bbb4d8369e382ae1: Status 404 returned error can't find the container with id f46824a6cd1073a9b150d2d286a231a8ee512bb26b60d4e3bbb4d8369e382ae1 Jan 27 20:18:59 crc kubenswrapper[4793]: I0127 20:18:59.717974 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" event={"ID":"3cb9730b-6fca-41ce-a6d8-9215c13b01e0","Type":"ContainerStarted","Data":"d7953c732469ef6c12df370c84bd1b5697b22b67a7be1075531961ffdd5b6989"} Jan 27 20:18:59 crc kubenswrapper[4793]: I0127 20:18:59.720933 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" event={"ID":"016b5dea-69f7-4abf-8e8a-72adc4922be9","Type":"ContainerStarted","Data":"f46824a6cd1073a9b150d2d286a231a8ee512bb26b60d4e3bbb4d8369e382ae1"} Jan 27 20:19:04 crc kubenswrapper[4793]: I0127 20:19:04.770764 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:04 crc kubenswrapper[4793]: I0127 20:19:04.771119 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:04 crc kubenswrapper[4793]: I0127 20:19:04.833612 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:04 crc kubenswrapper[4793]: I0127 20:19:04.946756 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:05 crc kubenswrapper[4793]: I0127 20:19:05.085130 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:19:06 crc kubenswrapper[4793]: I0127 20:19:06.896118 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r9jt2" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="registry-server" containerID="cri-o://96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33" gracePeriod=2 Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.336858 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.432483 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content\") pod \"6083fd40-42bf-4490-990f-bcfea8017677\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.432586 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjctt\" (UniqueName: \"kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt\") pod \"6083fd40-42bf-4490-990f-bcfea8017677\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.432686 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities\") pod \"6083fd40-42bf-4490-990f-bcfea8017677\" (UID: \"6083fd40-42bf-4490-990f-bcfea8017677\") " Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.433434 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities" (OuterVolumeSpecName: "utilities") pod "6083fd40-42bf-4490-990f-bcfea8017677" (UID: "6083fd40-42bf-4490-990f-bcfea8017677"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.449753 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt" (OuterVolumeSpecName: "kube-api-access-qjctt") pod "6083fd40-42bf-4490-990f-bcfea8017677" (UID: "6083fd40-42bf-4490-990f-bcfea8017677"). InnerVolumeSpecName "kube-api-access-qjctt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.534045 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.534085 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjctt\" (UniqueName: \"kubernetes.io/projected/6083fd40-42bf-4490-990f-bcfea8017677-kube-api-access-qjctt\") on node \"crc\" DevicePath \"\"" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.826913 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6083fd40-42bf-4490-990f-bcfea8017677" (UID: "6083fd40-42bf-4490-990f-bcfea8017677"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.837231 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6083fd40-42bf-4490-990f-bcfea8017677-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.901905 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" event={"ID":"016b5dea-69f7-4abf-8e8a-72adc4922be9","Type":"ContainerStarted","Data":"eb651dad1598e19d5a2997179119d437a81d79c6b9388f648d7cbaf4aad8d447"} Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.902834 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.903135 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" event={"ID":"3cb9730b-6fca-41ce-a6d8-9215c13b01e0","Type":"ContainerStarted","Data":"c8df7ac404bd48a0dea8adad1102aaee4075b4773a35fdf29ed4f657fcd0ea4d"} Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.903235 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.904714 4793 generic.go:334] "Generic (PLEG): container finished" podID="6083fd40-42bf-4490-990f-bcfea8017677" containerID="96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33" exitCode=0 Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.904742 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerDied","Data":"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33"} Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.904759 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9jt2" event={"ID":"6083fd40-42bf-4490-990f-bcfea8017677","Type":"ContainerDied","Data":"ceec88664fe2ed613ddc69a991b43bfa2a0d467207551571002e01b6729f915a"} Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.904775 4793 scope.go:117] "RemoveContainer" containerID="96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.904792 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9jt2" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.923106 4793 scope.go:117] "RemoveContainer" containerID="b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.930663 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" podStartSLOduration=2.498376182 podStartE2EDuration="9.930648458s" podCreationTimestamp="2026-01-27 20:18:58 +0000 UTC" firstStartedPulling="2026-01-27 20:18:59.520597335 +0000 UTC m=+964.910850491" lastFinishedPulling="2026-01-27 20:19:06.952869611 +0000 UTC m=+972.343122767" observedRunningTime="2026-01-27 20:19:07.930228138 +0000 UTC m=+973.320481294" watchObservedRunningTime="2026-01-27 20:19:07.930648458 +0000 UTC m=+973.320901614" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.950349 4793 scope.go:117] "RemoveContainer" containerID="609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.954906 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" podStartSLOduration=3.0596366 podStartE2EDuration="10.954887066s" podCreationTimestamp="2026-01-27 20:18:57 +0000 UTC" firstStartedPulling="2026-01-27 20:18:59.035928241 +0000 UTC m=+964.426181397" lastFinishedPulling="2026-01-27 20:19:06.931178707 +0000 UTC m=+972.321431863" observedRunningTime="2026-01-27 20:19:07.95140174 +0000 UTC m=+973.341654896" watchObservedRunningTime="2026-01-27 20:19:07.954887066 +0000 UTC m=+973.345140232" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.964499 4793 scope.go:117] "RemoveContainer" containerID="96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33" Jan 27 20:19:07 crc kubenswrapper[4793]: E0127 20:19:07.964976 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33\": container with ID starting with 96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33 not found: ID does not exist" containerID="96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.965086 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33"} err="failed to get container status \"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33\": rpc error: code = NotFound desc = could not find container \"96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33\": container with ID starting with 96cba86ff21055eba53cd43c1e736451647f4687d43ce4126c9bdc88a03aca33 not found: ID does not exist" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.965203 4793 scope.go:117] "RemoveContainer" containerID="b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026" Jan 27 20:19:07 crc kubenswrapper[4793]: E0127 20:19:07.965585 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026\": container with ID starting with b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026 not found: ID does not exist" containerID="b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.965636 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026"} err="failed to get container status \"b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026\": rpc error: code = NotFound desc = could not find container \"b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026\": container with ID starting with b700d1ceda49ae61d505bd3d7d24aacb142e1fc544a25736a231e9016ce21026 not found: ID does not exist" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.965673 4793 scope.go:117] "RemoveContainer" containerID="609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd" Jan 27 20:19:07 crc kubenswrapper[4793]: E0127 20:19:07.965970 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd\": container with ID starting with 609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd not found: ID does not exist" containerID="609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.966071 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd"} err="failed to get container status \"609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd\": rpc error: code = NotFound desc = could not find container \"609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd\": container with ID starting with 609e13dae101d95ae21ea095ac922e7d6225043fc8d8f29c232918b598ac09dd not found: ID does not exist" Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.974596 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:19:07 crc kubenswrapper[4793]: I0127 20:19:07.980128 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r9jt2"] Jan 27 20:19:09 crc kubenswrapper[4793]: I0127 20:19:09.810386 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6083fd40-42bf-4490-990f-bcfea8017677" path="/var/lib/kubelet/pods/6083fd40-42bf-4490-990f-bcfea8017677/volumes" Jan 27 20:19:18 crc kubenswrapper[4793]: I0127 20:19:18.731515 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-8fb588c74-mjlw7" Jan 27 20:19:22 crc kubenswrapper[4793]: I0127 20:19:22.753858 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:19:22 crc kubenswrapper[4793]: I0127 20:19:22.755147 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:19:38 crc kubenswrapper[4793]: I0127 20:19:38.132724 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-77ddf8bbff-6skqs" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.086585 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw"] Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.087182 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="extract-utilities" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.087200 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="extract-utilities" Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.087217 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="registry-server" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.087225 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="registry-server" Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.087240 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="extract-content" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.087247 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="extract-content" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.087384 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6083fd40-42bf-4490-990f-bcfea8017677" containerName="registry-server" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.087927 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.090201 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.091615 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-dnw4s" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.101627 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-9wlsd"] Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.110270 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw"] Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.110383 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.113220 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.113406 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.177051 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-rzvg6"] Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.178228 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.180365 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.180369 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.180884 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jm472" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.181160 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.191346 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-r9hdv"] Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.193385 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.195149 4793 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.225601 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-r9hdv"] Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228651 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics-certs\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228726 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zlwf\" (UniqueName: \"kubernetes.io/projected/6cdd56cd-76a3-41b7-8ed8-4446462605e3-kube-api-access-2zlwf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228846 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metrics-certs\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228876 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metallb-excludel2\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228905 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228941 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcs64\" (UniqueName: \"kubernetes.io/projected/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-kube-api-access-qcs64\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.228986 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-startup\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229041 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-reloader\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229071 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229109 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-sockets\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229141 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p69p6\" (UniqueName: \"kubernetes.io/projected/a694ab3e-9452-4d89-aad5-7dca775c9481-kube-api-access-p69p6\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229183 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-conf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.229217 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330766 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-cert\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330851 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-metrics-certs\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metrics-certs\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metallb-excludel2\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330941 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330968 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcs64\" (UniqueName: \"kubernetes.io/projected/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-kube-api-access-qcs64\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.330991 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-startup\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331020 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp5jr\" (UniqueName: \"kubernetes.io/projected/5977e87a-93dd-4494-a4f7-cdc151fae6f4-kube-api-access-cp5jr\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331053 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-reloader\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331079 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331106 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-sockets\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331126 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p69p6\" (UniqueName: \"kubernetes.io/projected/a694ab3e-9452-4d89-aad5-7dca775c9481-kube-api-access-p69p6\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331155 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-conf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331192 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331235 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics-certs\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.331259 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zlwf\" (UniqueName: \"kubernetes.io/projected/6cdd56cd-76a3-41b7-8ed8-4446462605e3-kube-api-access-2zlwf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.331778 4793 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.331838 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist podName:90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e nodeName:}" failed. No retries permitted until 2026-01-27 20:19:39.831820909 +0000 UTC m=+1005.222074065 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist") pod "speaker-rzvg6" (UID: "90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e") : secret "metallb-memberlist" not found Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.332127 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metallb-excludel2\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.332179 4793 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.332321 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert podName:a694ab3e-9452-4d89-aad5-7dca775c9481 nodeName:}" failed. No retries permitted until 2026-01-27 20:19:39.832305911 +0000 UTC m=+1005.222559067 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert") pod "frr-k8s-webhook-server-7df86c4f6c-j6cjw" (UID: "a694ab3e-9452-4d89-aad5-7dca775c9481") : secret "frr-k8s-webhook-server-cert" not found Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.332385 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-sockets\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.332475 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.332919 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-conf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.333066 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6cdd56cd-76a3-41b7-8ed8-4446462605e3-reloader\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.333422 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6cdd56cd-76a3-41b7-8ed8-4446462605e3-frr-startup\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.337260 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-metrics-certs\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.355231 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6cdd56cd-76a3-41b7-8ed8-4446462605e3-metrics-certs\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.355602 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcs64\" (UniqueName: \"kubernetes.io/projected/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-kube-api-access-qcs64\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.357017 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p69p6\" (UniqueName: \"kubernetes.io/projected/a694ab3e-9452-4d89-aad5-7dca775c9481-kube-api-access-p69p6\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.359203 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zlwf\" (UniqueName: \"kubernetes.io/projected/6cdd56cd-76a3-41b7-8ed8-4446462605e3-kube-api-access-2zlwf\") pod \"frr-k8s-9wlsd\" (UID: \"6cdd56cd-76a3-41b7-8ed8-4446462605e3\") " pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.432398 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-cert\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.432461 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-metrics-certs\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.432511 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp5jr\" (UniqueName: \"kubernetes.io/projected/5977e87a-93dd-4494-a4f7-cdc151fae6f4-kube-api-access-cp5jr\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.432716 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.439850 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-cert\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.439850 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5977e87a-93dd-4494-a4f7-cdc151fae6f4-metrics-certs\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.449395 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp5jr\" (UniqueName: \"kubernetes.io/projected/5977e87a-93dd-4494-a4f7-cdc151fae6f4-kube-api-access-cp5jr\") pod \"controller-6968d8fdc4-r9hdv\" (UID: \"5977e87a-93dd-4494-a4f7-cdc151fae6f4\") " pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.517119 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.837325 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.837671 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.837520 4793 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 27 20:19:39 crc kubenswrapper[4793]: E0127 20:19:39.837824 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist podName:90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e nodeName:}" failed. No retries permitted until 2026-01-27 20:19:40.837799469 +0000 UTC m=+1006.228052785 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist") pod "speaker-rzvg6" (UID: "90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e") : secret "metallb-memberlist" not found Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.844391 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a694ab3e-9452-4d89-aad5-7dca775c9481-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-j6cjw\" (UID: \"a694ab3e-9452-4d89-aad5-7dca775c9481\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:39 crc kubenswrapper[4793]: I0127 20:19:39.922077 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-r9hdv"] Jan 27 20:19:39 crc kubenswrapper[4793]: W0127 20:19:39.925162 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5977e87a_93dd_4494_a4f7_cdc151fae6f4.slice/crio-8fecf1ed93bbca021413cdba31a4e161f9ee8d8ef22210119e59d2fe16b7b75c WatchSource:0}: Error finding container 8fecf1ed93bbca021413cdba31a4e161f9ee8d8ef22210119e59d2fe16b7b75c: Status 404 returned error can't find the container with id 8fecf1ed93bbca021413cdba31a4e161f9ee8d8ef22210119e59d2fe16b7b75c Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.015163 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.110282 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-r9hdv" event={"ID":"5977e87a-93dd-4494-a4f7-cdc151fae6f4","Type":"ContainerStarted","Data":"92544e39a575f346927063a599abf45076752567a1e17b4295878de25d4fbad0"} Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.110327 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-r9hdv" event={"ID":"5977e87a-93dd-4494-a4f7-cdc151fae6f4","Type":"ContainerStarted","Data":"8fecf1ed93bbca021413cdba31a4e161f9ee8d8ef22210119e59d2fe16b7b75c"} Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.111018 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"e521f83d78fe666e7071f2b3ae23a09797f997218b9537175564e641a6376937"} Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.444382 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw"] Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.854067 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.882430 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e-memberlist\") pod \"speaker-rzvg6\" (UID: \"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e\") " pod="metallb-system/speaker-rzvg6" Jan 27 20:19:40 crc kubenswrapper[4793]: I0127 20:19:40.996797 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rzvg6" Jan 27 20:19:41 crc kubenswrapper[4793]: W0127 20:19:41.057239 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90e6b2bf_caa8_4ac8_aa2b_4c9389a3666e.slice/crio-900e4870b1a7b07dc9edb126a30d20b975e3b953305cc693f3845cd0a5f0a87a WatchSource:0}: Error finding container 900e4870b1a7b07dc9edb126a30d20b975e3b953305cc693f3845cd0a5f0a87a: Status 404 returned error can't find the container with id 900e4870b1a7b07dc9edb126a30d20b975e3b953305cc693f3845cd0a5f0a87a Jan 27 20:19:41 crc kubenswrapper[4793]: I0127 20:19:41.120389 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-r9hdv" event={"ID":"5977e87a-93dd-4494-a4f7-cdc151fae6f4","Type":"ContainerStarted","Data":"e0f33e0604896dfd5b8c203ccfc267eef553a44d6326a0d8cd6d38787846c4f8"} Jan 27 20:19:41 crc kubenswrapper[4793]: I0127 20:19:41.120517 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:19:41 crc kubenswrapper[4793]: I0127 20:19:41.121654 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rzvg6" event={"ID":"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e","Type":"ContainerStarted","Data":"900e4870b1a7b07dc9edb126a30d20b975e3b953305cc693f3845cd0a5f0a87a"} Jan 27 20:19:41 crc kubenswrapper[4793]: I0127 20:19:41.122539 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" event={"ID":"a694ab3e-9452-4d89-aad5-7dca775c9481","Type":"ContainerStarted","Data":"41262f3593f37ed8c0c23d714c1a3e29e9d8463605fc1e50ba530812f3ab0457"} Jan 27 20:19:41 crc kubenswrapper[4793]: I0127 20:19:41.139614 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-r9hdv" podStartSLOduration=2.139584215 podStartE2EDuration="2.139584215s" podCreationTimestamp="2026-01-27 20:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:19:41.138534819 +0000 UTC m=+1006.528787975" watchObservedRunningTime="2026-01-27 20:19:41.139584215 +0000 UTC m=+1006.529837371" Jan 27 20:19:42 crc kubenswrapper[4793]: I0127 20:19:42.133117 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rzvg6" event={"ID":"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e","Type":"ContainerStarted","Data":"e4b26fa434796da2cd0bf25709e467795d5e4a781758fceeccb42589b2a31549"} Jan 27 20:19:42 crc kubenswrapper[4793]: I0127 20:19:42.133423 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rzvg6" event={"ID":"90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e","Type":"ContainerStarted","Data":"643543d2211bab88ce61cce842b1b48910f8e2a7aef5e843e37c95b3b94a6633"} Jan 27 20:19:42 crc kubenswrapper[4793]: I0127 20:19:42.133453 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-rzvg6" Jan 27 20:19:45 crc kubenswrapper[4793]: I0127 20:19:45.989314 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-rzvg6" podStartSLOduration=6.989296377 podStartE2EDuration="6.989296377s" podCreationTimestamp="2026-01-27 20:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:19:42.159889609 +0000 UTC m=+1007.550142775" watchObservedRunningTime="2026-01-27 20:19:45.989296377 +0000 UTC m=+1011.379549533" Jan 27 20:19:51 crc kubenswrapper[4793]: I0127 20:19:51.219104 4793 generic.go:334] "Generic (PLEG): container finished" podID="6cdd56cd-76a3-41b7-8ed8-4446462605e3" containerID="8f7f1cd026c7c21f92f04d9369621bf04ce7859d7eca2864180610b8b3494152" exitCode=0 Jan 27 20:19:51 crc kubenswrapper[4793]: I0127 20:19:51.219160 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerDied","Data":"8f7f1cd026c7c21f92f04d9369621bf04ce7859d7eca2864180610b8b3494152"} Jan 27 20:19:51 crc kubenswrapper[4793]: I0127 20:19:51.221601 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" event={"ID":"a694ab3e-9452-4d89-aad5-7dca775c9481","Type":"ContainerStarted","Data":"6856f45176d122ebdc5b5317e5909b42d00bad34a604921e05813c1d24cb9906"} Jan 27 20:19:51 crc kubenswrapper[4793]: I0127 20:19:51.221922 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:19:51 crc kubenswrapper[4793]: I0127 20:19:51.260474 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" podStartSLOduration=2.660615434 podStartE2EDuration="12.260450246s" podCreationTimestamp="2026-01-27 20:19:39 +0000 UTC" firstStartedPulling="2026-01-27 20:19:40.454264379 +0000 UTC m=+1005.844517535" lastFinishedPulling="2026-01-27 20:19:50.054099191 +0000 UTC m=+1015.444352347" observedRunningTime="2026-01-27 20:19:51.254161732 +0000 UTC m=+1016.644414898" watchObservedRunningTime="2026-01-27 20:19:51.260450246 +0000 UTC m=+1016.650703402" Jan 27 20:19:52 crc kubenswrapper[4793]: I0127 20:19:52.228814 4793 generic.go:334] "Generic (PLEG): container finished" podID="6cdd56cd-76a3-41b7-8ed8-4446462605e3" containerID="b13e5a792f4b18222e9cb847150646fb253d4fbe3b622f706cb3bfee270bc3f7" exitCode=0 Jan 27 20:19:52 crc kubenswrapper[4793]: I0127 20:19:52.228921 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerDied","Data":"b13e5a792f4b18222e9cb847150646fb253d4fbe3b622f706cb3bfee270bc3f7"} Jan 27 20:19:52 crc kubenswrapper[4793]: I0127 20:19:52.753342 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:19:52 crc kubenswrapper[4793]: I0127 20:19:52.753773 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:19:53 crc kubenswrapper[4793]: I0127 20:19:53.238039 4793 generic.go:334] "Generic (PLEG): container finished" podID="6cdd56cd-76a3-41b7-8ed8-4446462605e3" containerID="85fcff04477b5fa4b9e07811e5ab3454ffdff5af77ac06e13048f177cd6f24ce" exitCode=0 Jan 27 20:19:53 crc kubenswrapper[4793]: I0127 20:19:53.238091 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerDied","Data":"85fcff04477b5fa4b9e07811e5ab3454ffdff5af77ac06e13048f177cd6f24ce"} Jan 27 20:19:54 crc kubenswrapper[4793]: I0127 20:19:54.322326 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"b4507c0cb7cbf2ac5d0ef1fa848e6b548279532fb87365e1c1f4ceec8c3870aa"} Jan 27 20:19:54 crc kubenswrapper[4793]: I0127 20:19:54.322662 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"0a0345632587a2d28844a7fecef25e54496bc853b3562125ef4d224a91b64ddb"} Jan 27 20:19:54 crc kubenswrapper[4793]: I0127 20:19:54.322677 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"852089214c260bf5cc4c9dd438b3079fb2125d6addb0c78f6979755b38208d3b"} Jan 27 20:19:54 crc kubenswrapper[4793]: I0127 20:19:54.322687 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"af2f7b686c0acb162ab2f7f425dc2fd96a0d96342a636ce14110937a09a3d390"} Jan 27 20:19:55 crc kubenswrapper[4793]: I0127 20:19:55.331630 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"a52453d082566b544e7c9880365e038599a5591467df9e76fe96646e7ffe91b8"} Jan 27 20:19:55 crc kubenswrapper[4793]: I0127 20:19:55.331700 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9wlsd" event={"ID":"6cdd56cd-76a3-41b7-8ed8-4446462605e3","Type":"ContainerStarted","Data":"b2e0cc57c942f2798e3a2c283f8030702da141fddabe0dcfd89e91ad5ce21333"} Jan 27 20:19:55 crc kubenswrapper[4793]: I0127 20:19:55.332993 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:55 crc kubenswrapper[4793]: I0127 20:19:55.356697 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-9wlsd" podStartSLOduration=5.927770306 podStartE2EDuration="16.356679713s" podCreationTimestamp="2026-01-27 20:19:39 +0000 UTC" firstStartedPulling="2026-01-27 20:19:39.603968611 +0000 UTC m=+1004.994221767" lastFinishedPulling="2026-01-27 20:19:50.032878018 +0000 UTC m=+1015.423131174" observedRunningTime="2026-01-27 20:19:55.351782262 +0000 UTC m=+1020.742035448" watchObservedRunningTime="2026-01-27 20:19:55.356679713 +0000 UTC m=+1020.746932869" Jan 27 20:19:59 crc kubenswrapper[4793]: I0127 20:19:59.433337 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:59 crc kubenswrapper[4793]: I0127 20:19:59.470301 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:19:59 crc kubenswrapper[4793]: I0127 20:19:59.521505 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-r9hdv" Jan 27 20:20:00 crc kubenswrapper[4793]: I0127 20:20:00.030966 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-j6cjw" Jan 27 20:20:01 crc kubenswrapper[4793]: I0127 20:20:01.001681 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-rzvg6" Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.698107 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.700305 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.704918 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.706798 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-v2l5b" Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.707792 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.718038 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:04 crc kubenswrapper[4793]: I0127 20:20:04.957657 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4flxc\" (UniqueName: \"kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc\") pod \"openstack-operator-index-h52ld\" (UID: \"57064c5d-f52e-487a-909e-af9671d7a027\") " pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:05 crc kubenswrapper[4793]: I0127 20:20:05.059306 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4flxc\" (UniqueName: \"kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc\") pod \"openstack-operator-index-h52ld\" (UID: \"57064c5d-f52e-487a-909e-af9671d7a027\") " pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:05 crc kubenswrapper[4793]: I0127 20:20:05.079277 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4flxc\" (UniqueName: \"kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc\") pod \"openstack-operator-index-h52ld\" (UID: \"57064c5d-f52e-487a-909e-af9671d7a027\") " pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:05 crc kubenswrapper[4793]: I0127 20:20:05.318988 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:05 crc kubenswrapper[4793]: I0127 20:20:05.595702 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:06 crc kubenswrapper[4793]: I0127 20:20:06.428715 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-h52ld" event={"ID":"57064c5d-f52e-487a-909e-af9671d7a027","Type":"ContainerStarted","Data":"02185be7aec212bc1a532c906409060ae7afd9407bf5f46adddb621a150e2201"} Jan 27 20:20:07 crc kubenswrapper[4793]: I0127 20:20:07.273058 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.075735 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-7ll2b"] Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.076728 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.083715 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7ll2b"] Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.122441 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjlqh\" (UniqueName: \"kubernetes.io/projected/cff9b66d-6eb7-4fd6-8f40-32928a43df4d-kube-api-access-gjlqh\") pod \"openstack-operator-index-7ll2b\" (UID: \"cff9b66d-6eb7-4fd6-8f40-32928a43df4d\") " pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.223729 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjlqh\" (UniqueName: \"kubernetes.io/projected/cff9b66d-6eb7-4fd6-8f40-32928a43df4d-kube-api-access-gjlqh\") pod \"openstack-operator-index-7ll2b\" (UID: \"cff9b66d-6eb7-4fd6-8f40-32928a43df4d\") " pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.245768 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjlqh\" (UniqueName: \"kubernetes.io/projected/cff9b66d-6eb7-4fd6-8f40-32928a43df4d-kube-api-access-gjlqh\") pod \"openstack-operator-index-7ll2b\" (UID: \"cff9b66d-6eb7-4fd6-8f40-32928a43df4d\") " pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:08 crc kubenswrapper[4793]: I0127 20:20:08.399446 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:09 crc kubenswrapper[4793]: I0127 20:20:09.445420 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-9wlsd" Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.443381 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7ll2b"] Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.459013 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-h52ld" event={"ID":"57064c5d-f52e-487a-909e-af9671d7a027","Type":"ContainerStarted","Data":"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217"} Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.459282 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-h52ld" podUID="57064c5d-f52e-487a-909e-af9671d7a027" containerName="registry-server" containerID="cri-o://fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217" gracePeriod=2 Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.478989 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-h52ld" podStartSLOduration=2.005187225 podStartE2EDuration="6.478973271s" podCreationTimestamp="2026-01-27 20:20:04 +0000 UTC" firstStartedPulling="2026-01-27 20:20:05.605270878 +0000 UTC m=+1030.995524034" lastFinishedPulling="2026-01-27 20:20:10.079056914 +0000 UTC m=+1035.469310080" observedRunningTime="2026-01-27 20:20:10.476663055 +0000 UTC m=+1035.866916221" watchObservedRunningTime="2026-01-27 20:20:10.478973271 +0000 UTC m=+1035.869226427" Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.842224 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:10 crc kubenswrapper[4793]: I0127 20:20:10.929930 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4flxc\" (UniqueName: \"kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc\") pod \"57064c5d-f52e-487a-909e-af9671d7a027\" (UID: \"57064c5d-f52e-487a-909e-af9671d7a027\") " Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.043471 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc" (OuterVolumeSpecName: "kube-api-access-4flxc") pod "57064c5d-f52e-487a-909e-af9671d7a027" (UID: "57064c5d-f52e-487a-909e-af9671d7a027"). InnerVolumeSpecName "kube-api-access-4flxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.139641 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4flxc\" (UniqueName: \"kubernetes.io/projected/57064c5d-f52e-487a-909e-af9671d7a027-kube-api-access-4flxc\") on node \"crc\" DevicePath \"\"" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.466791 4793 generic.go:334] "Generic (PLEG): container finished" podID="57064c5d-f52e-487a-909e-af9671d7a027" containerID="fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217" exitCode=0 Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.466837 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-h52ld" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.466858 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-h52ld" event={"ID":"57064c5d-f52e-487a-909e-af9671d7a027","Type":"ContainerDied","Data":"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217"} Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.466882 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-h52ld" event={"ID":"57064c5d-f52e-487a-909e-af9671d7a027","Type":"ContainerDied","Data":"02185be7aec212bc1a532c906409060ae7afd9407bf5f46adddb621a150e2201"} Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.466919 4793 scope.go:117] "RemoveContainer" containerID="fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.468156 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7ll2b" event={"ID":"cff9b66d-6eb7-4fd6-8f40-32928a43df4d","Type":"ContainerStarted","Data":"753f32d4993509608b6446256b995f6c5462197ab28a8cf19833f4b670bf4a2f"} Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.468187 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7ll2b" event={"ID":"cff9b66d-6eb7-4fd6-8f40-32928a43df4d","Type":"ContainerStarted","Data":"d55eaa4b4d5f1f088a25c4741011461c796c40d83a47a411ce2ead57f5da90cd"} Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.490517 4793 scope.go:117] "RemoveContainer" containerID="fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217" Jan 27 20:20:11 crc kubenswrapper[4793]: E0127 20:20:11.493297 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217\": container with ID starting with fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217 not found: ID does not exist" containerID="fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.493455 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217"} err="failed to get container status \"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217\": rpc error: code = NotFound desc = could not find container \"fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217\": container with ID starting with fc25b0893a449dfc544f33177007135f29ebc0096682aaa49f93e9d93254c217 not found: ID does not exist" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.494508 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-7ll2b" podStartSLOduration=3.416818605 podStartE2EDuration="3.494489478s" podCreationTimestamp="2026-01-27 20:20:08 +0000 UTC" firstStartedPulling="2026-01-27 20:20:10.463781767 +0000 UTC m=+1035.854034923" lastFinishedPulling="2026-01-27 20:20:10.54145264 +0000 UTC m=+1035.931705796" observedRunningTime="2026-01-27 20:20:11.489522016 +0000 UTC m=+1036.879775172" watchObservedRunningTime="2026-01-27 20:20:11.494489478 +0000 UTC m=+1036.884742634" Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.504849 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.508814 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-h52ld"] Jan 27 20:20:11 crc kubenswrapper[4793]: I0127 20:20:11.811953 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57064c5d-f52e-487a-909e-af9671d7a027" path="/var/lib/kubelet/pods/57064c5d-f52e-487a-909e-af9671d7a027/volumes" Jan 27 20:20:18 crc kubenswrapper[4793]: I0127 20:20:18.400005 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:18 crc kubenswrapper[4793]: I0127 20:20:18.400488 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:18 crc kubenswrapper[4793]: I0127 20:20:18.440747 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:18 crc kubenswrapper[4793]: I0127 20:20:18.557256 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-7ll2b" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.114398 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c"] Jan 27 20:20:21 crc kubenswrapper[4793]: E0127 20:20:21.115498 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57064c5d-f52e-487a-909e-af9671d7a027" containerName="registry-server" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.115521 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="57064c5d-f52e-487a-909e-af9671d7a027" containerName="registry-server" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.116506 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="57064c5d-f52e-487a-909e-af9671d7a027" containerName="registry-server" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.117951 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.122317 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-4c98n" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.122726 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c"] Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.230495 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtcdm\" (UniqueName: \"kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.230657 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.230709 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.332510 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtcdm\" (UniqueName: \"kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.332595 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.332626 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.333194 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.333264 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.371844 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtcdm\" (UniqueName: \"kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm\") pod \"6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.440001 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:21 crc kubenswrapper[4793]: I0127 20:20:21.848154 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c"] Jan 27 20:20:22 crc kubenswrapper[4793]: I0127 20:20:22.980675 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:20:22 crc kubenswrapper[4793]: I0127 20:20:22.980963 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:20:22 crc kubenswrapper[4793]: I0127 20:20:22.981018 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:20:22 crc kubenswrapper[4793]: I0127 20:20:22.981639 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:20:22 crc kubenswrapper[4793]: I0127 20:20:22.981697 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a" gracePeriod=600 Jan 27 20:20:23 crc kubenswrapper[4793]: I0127 20:20:23.000255 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerStarted","Data":"06f2abd284c09d0c3ac1009aea85f85ca45b8824639fb1c1cf1854cc94347f82"} Jan 27 20:20:23 crc kubenswrapper[4793]: I0127 20:20:23.000309 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerStarted","Data":"338167d654e9f6f47e913dc4e4ee3ff1dfd8e3c5ba4cf7e5a5a155a024f8abd8"} Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.008093 4793 generic.go:334] "Generic (PLEG): container finished" podID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerID="06f2abd284c09d0c3ac1009aea85f85ca45b8824639fb1c1cf1854cc94347f82" exitCode=0 Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.008147 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerDied","Data":"06f2abd284c09d0c3ac1009aea85f85ca45b8824639fb1c1cf1854cc94347f82"} Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.014091 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a" exitCode=0 Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.014138 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a"} Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.014180 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296"} Jan 27 20:20:24 crc kubenswrapper[4793]: I0127 20:20:24.014207 4793 scope.go:117] "RemoveContainer" containerID="d44a61fca52544454e89e7b279be6491680e95bc7fe8be3fc3c65a1e94d3f817" Jan 27 20:20:25 crc kubenswrapper[4793]: I0127 20:20:25.022567 4793 generic.go:334] "Generic (PLEG): container finished" podID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerID="7dfdbb72ab7ed8b5d6e8850a322c7d1098e7c26532f37c078b20c926a9d0ef4d" exitCode=0 Jan 27 20:20:25 crc kubenswrapper[4793]: I0127 20:20:25.022631 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerDied","Data":"7dfdbb72ab7ed8b5d6e8850a322c7d1098e7c26532f37c078b20c926a9d0ef4d"} Jan 27 20:20:26 crc kubenswrapper[4793]: I0127 20:20:26.041986 4793 generic.go:334] "Generic (PLEG): container finished" podID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerID="f780c1e029ea87a53eb69b1c90e92c49f980df58f30dea70c61faa116b87f5b5" exitCode=0 Jan 27 20:20:26 crc kubenswrapper[4793]: I0127 20:20:26.042092 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerDied","Data":"f780c1e029ea87a53eb69b1c90e92c49f980df58f30dea70c61faa116b87f5b5"} Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.332656 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.480117 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtcdm\" (UniqueName: \"kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm\") pod \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.480216 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util\") pod \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.480297 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle\") pod \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\" (UID: \"fab801dc-51a6-4937-9a1b-67eb5db6c0a9\") " Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.481114 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle" (OuterVolumeSpecName: "bundle") pod "fab801dc-51a6-4937-9a1b-67eb5db6c0a9" (UID: "fab801dc-51a6-4937-9a1b-67eb5db6c0a9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.488037 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm" (OuterVolumeSpecName: "kube-api-access-qtcdm") pod "fab801dc-51a6-4937-9a1b-67eb5db6c0a9" (UID: "fab801dc-51a6-4937-9a1b-67eb5db6c0a9"). InnerVolumeSpecName "kube-api-access-qtcdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.582651 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtcdm\" (UniqueName: \"kubernetes.io/projected/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-kube-api-access-qtcdm\") on node \"crc\" DevicePath \"\"" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.582710 4793 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.735023 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util" (OuterVolumeSpecName: "util") pod "fab801dc-51a6-4937-9a1b-67eb5db6c0a9" (UID: "fab801dc-51a6-4937-9a1b-67eb5db6c0a9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:20:27 crc kubenswrapper[4793]: I0127 20:20:27.785305 4793 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fab801dc-51a6-4937-9a1b-67eb5db6c0a9-util\") on node \"crc\" DevicePath \"\"" Jan 27 20:20:28 crc kubenswrapper[4793]: I0127 20:20:28.059632 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" event={"ID":"fab801dc-51a6-4937-9a1b-67eb5db6c0a9","Type":"ContainerDied","Data":"338167d654e9f6f47e913dc4e4ee3ff1dfd8e3c5ba4cf7e5a5a155a024f8abd8"} Jan 27 20:20:28 crc kubenswrapper[4793]: I0127 20:20:28.059683 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="338167d654e9f6f47e913dc4e4ee3ff1dfd8e3c5ba4cf7e5a5a155a024f8abd8" Jan 27 20:20:28 crc kubenswrapper[4793]: I0127 20:20:28.059683 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.853352 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz"] Jan 27 20:20:33 crc kubenswrapper[4793]: E0127 20:20:33.854203 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="util" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.854220 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="util" Jan 27 20:20:33 crc kubenswrapper[4793]: E0127 20:20:33.854254 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="extract" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.854262 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="extract" Jan 27 20:20:33 crc kubenswrapper[4793]: E0127 20:20:33.854275 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="pull" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.854283 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="pull" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.854402 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="fab801dc-51a6-4937-9a1b-67eb5db6c0a9" containerName="extract" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.854861 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.857285 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-whb62" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.866823 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp86j\" (UniqueName: \"kubernetes.io/projected/6c36140e-9ce9-4400-9c4f-00041bb11a41-kube-api-access-zp86j\") pod \"openstack-operator-controller-init-77fb6f4c55-gn2gz\" (UID: \"6c36140e-9ce9-4400-9c4f-00041bb11a41\") " pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.935884 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz"] Jan 27 20:20:33 crc kubenswrapper[4793]: I0127 20:20:33.975089 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp86j\" (UniqueName: \"kubernetes.io/projected/6c36140e-9ce9-4400-9c4f-00041bb11a41-kube-api-access-zp86j\") pod \"openstack-operator-controller-init-77fb6f4c55-gn2gz\" (UID: \"6c36140e-9ce9-4400-9c4f-00041bb11a41\") " pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:34 crc kubenswrapper[4793]: I0127 20:20:34.007621 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp86j\" (UniqueName: \"kubernetes.io/projected/6c36140e-9ce9-4400-9c4f-00041bb11a41-kube-api-access-zp86j\") pod \"openstack-operator-controller-init-77fb6f4c55-gn2gz\" (UID: \"6c36140e-9ce9-4400-9c4f-00041bb11a41\") " pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:34 crc kubenswrapper[4793]: I0127 20:20:34.174747 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:34 crc kubenswrapper[4793]: I0127 20:20:34.429382 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz"] Jan 27 20:20:35 crc kubenswrapper[4793]: I0127 20:20:35.129463 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" event={"ID":"6c36140e-9ce9-4400-9c4f-00041bb11a41","Type":"ContainerStarted","Data":"6146cede96b66dd517da59ba781a9aee2ac4bc1167ea8279bb6cb7d39cfe6b0a"} Jan 27 20:20:42 crc kubenswrapper[4793]: I0127 20:20:42.187084 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" event={"ID":"6c36140e-9ce9-4400-9c4f-00041bb11a41","Type":"ContainerStarted","Data":"e3e02dd8dfba70b59513d9943c4f25a85df3e9820f952763f202b9317f0cf892"} Jan 27 20:20:42 crc kubenswrapper[4793]: I0127 20:20:42.188665 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:20:42 crc kubenswrapper[4793]: I0127 20:20:42.218455 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" podStartSLOduration=2.37128321 podStartE2EDuration="9.218431883s" podCreationTimestamp="2026-01-27 20:20:33 +0000 UTC" firstStartedPulling="2026-01-27 20:20:34.453428384 +0000 UTC m=+1059.843681540" lastFinishedPulling="2026-01-27 20:20:41.300577057 +0000 UTC m=+1066.690830213" observedRunningTime="2026-01-27 20:20:42.214529027 +0000 UTC m=+1067.604782183" watchObservedRunningTime="2026-01-27 20:20:42.218431883 +0000 UTC m=+1067.608685059" Jan 27 20:20:54 crc kubenswrapper[4793]: I0127 20:20:54.179091 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-77fb6f4c55-gn2gz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.278643 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.280045 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.286370 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.287390 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.288923 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-mbdhm" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.289265 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-bfh7q" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.289537 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.290500 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.297750 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-qtj2w" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.299263 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.304702 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.315590 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.319311 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.320099 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.369726 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-s5t7v" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.388675 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.390270 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.397651 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.399584 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-k6b64" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.407109 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.419042 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.420086 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.423411 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-xtlqm" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.427626 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.461223 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-752f2"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.465764 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.468175 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-6smst" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.468424 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.477765 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9tml\" (UniqueName: \"kubernetes.io/projected/742adff5-75e8-4941-9815-03bc77850cfa-kube-api-access-j9tml\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.477963 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9wcn\" (UniqueName: \"kubernetes.io/projected/4105e86a-92bd-45a9-869f-310d634a514c-kube-api-access-n9wcn\") pod \"designate-operator-controller-manager-b45d7bf98-pddnh\" (UID: \"4105e86a-92bd-45a9-869f-310d634a514c\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.477981 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.478032 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk7kl\" (UniqueName: \"kubernetes.io/projected/01780455-12a9-41cc-80bb-71d643522796-kube-api-access-kk7kl\") pod \"cinder-operator-controller-manager-7478f7dbf9-qt685\" (UID: \"01780455-12a9-41cc-80bb-71d643522796\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.478051 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p66rq\" (UniqueName: \"kubernetes.io/projected/44104a6a-d41a-4b4c-b119-1886c9b48a8b-kube-api-access-p66rq\") pod \"glance-operator-controller-manager-78fdd796fd-crv46\" (UID: \"44104a6a-d41a-4b4c-b119-1886c9b48a8b\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.478082 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xtbq\" (UniqueName: \"kubernetes.io/projected/42bc0dc3-e9b2-4edc-865a-4d301956ec59-kube-api-access-5xtbq\") pod \"barbican-operator-controller-manager-7f86f8796f-jn9j9\" (UID: \"42bc0dc3-e9b2-4edc-865a-4d301956ec59\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.481882 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-752f2"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.497319 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.498326 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.509316 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-zkqnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.516625 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.517739 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.522350 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-rnmg6" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.549615 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.560469 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.561419 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.563815 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-rqgpj" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.587110 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.588072 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589248 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk7kl\" (UniqueName: \"kubernetes.io/projected/01780455-12a9-41cc-80bb-71d643522796-kube-api-access-kk7kl\") pod \"cinder-operator-controller-manager-7478f7dbf9-qt685\" (UID: \"01780455-12a9-41cc-80bb-71d643522796\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589281 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p66rq\" (UniqueName: \"kubernetes.io/projected/44104a6a-d41a-4b4c-b119-1886c9b48a8b-kube-api-access-p66rq\") pod \"glance-operator-controller-manager-78fdd796fd-crv46\" (UID: \"44104a6a-d41a-4b4c-b119-1886c9b48a8b\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589322 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xtbq\" (UniqueName: \"kubernetes.io/projected/42bc0dc3-e9b2-4edc-865a-4d301956ec59-kube-api-access-5xtbq\") pod \"barbican-operator-controller-manager-7f86f8796f-jn9j9\" (UID: \"42bc0dc3-e9b2-4edc-865a-4d301956ec59\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589383 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxkmj\" (UniqueName: \"kubernetes.io/projected/4bd1bccc-c605-43f0-a9ed-1f82139a6f16-kube-api-access-cxkmj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g4rnc\" (UID: \"4bd1bccc-c605-43f0-a9ed-1f82139a6f16\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589409 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9tml\" (UniqueName: \"kubernetes.io/projected/742adff5-75e8-4941-9815-03bc77850cfa-kube-api-access-j9tml\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589436 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9wcn\" (UniqueName: \"kubernetes.io/projected/4105e86a-92bd-45a9-869f-310d634a514c-kube-api-access-n9wcn\") pod \"designate-operator-controller-manager-b45d7bf98-pddnh\" (UID: \"4105e86a-92bd-45a9-869f-310d634a514c\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589470 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.589518 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvnpv\" (UniqueName: \"kubernetes.io/projected/cb13d95b-3b92-49bc-9bcd-af7d24b09869-kube-api-access-dvnpv\") pod \"heat-operator-controller-manager-594c8c9d5d-k9gch\" (UID: \"cb13d95b-3b92-49bc-9bcd-af7d24b09869\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:21:15 crc kubenswrapper[4793]: E0127 20:21:15.590734 4793 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:15 crc kubenswrapper[4793]: E0127 20:21:15.590783 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert podName:742adff5-75e8-4941-9815-03bc77850cfa nodeName:}" failed. No retries permitted until 2026-01-27 20:21:16.090764367 +0000 UTC m=+1101.481017523 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert") pod "infra-operator-controller-manager-694cf4f878-752f2" (UID: "742adff5-75e8-4941-9815-03bc77850cfa") : secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.596964 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-5mrt5" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.597601 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.610179 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.615104 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.616195 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.626313 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk7kl\" (UniqueName: \"kubernetes.io/projected/01780455-12a9-41cc-80bb-71d643522796-kube-api-access-kk7kl\") pod \"cinder-operator-controller-manager-7478f7dbf9-qt685\" (UID: \"01780455-12a9-41cc-80bb-71d643522796\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.632843 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-w6q2w" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.633858 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9tml\" (UniqueName: \"kubernetes.io/projected/742adff5-75e8-4941-9815-03bc77850cfa-kube-api-access-j9tml\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.639283 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9wcn\" (UniqueName: \"kubernetes.io/projected/4105e86a-92bd-45a9-869f-310d634a514c-kube-api-access-n9wcn\") pod \"designate-operator-controller-manager-b45d7bf98-pddnh\" (UID: \"4105e86a-92bd-45a9-869f-310d634a514c\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.653871 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.669662 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xtbq\" (UniqueName: \"kubernetes.io/projected/42bc0dc3-e9b2-4edc-865a-4d301956ec59-kube-api-access-5xtbq\") pod \"barbican-operator-controller-manager-7f86f8796f-jn9j9\" (UID: \"42bc0dc3-e9b2-4edc-865a-4d301956ec59\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.669984 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p66rq\" (UniqueName: \"kubernetes.io/projected/44104a6a-d41a-4b4c-b119-1886c9b48a8b-kube-api-access-p66rq\") pod \"glance-operator-controller-manager-78fdd796fd-crv46\" (UID: \"44104a6a-d41a-4b4c-b119-1886c9b48a8b\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.680857 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.691983 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnqb4\" (UniqueName: \"kubernetes.io/projected/ddb0bf87-c13a-48d3-9fa7-95c891f6c057-kube-api-access-hnqb4\") pod \"manila-operator-controller-manager-78c6999f6f-m5bwb\" (UID: \"ddb0bf87-c13a-48d3-9fa7-95c891f6c057\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692056 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvnpv\" (UniqueName: \"kubernetes.io/projected/cb13d95b-3b92-49bc-9bcd-af7d24b09869-kube-api-access-dvnpv\") pod \"heat-operator-controller-manager-594c8c9d5d-k9gch\" (UID: \"cb13d95b-3b92-49bc-9bcd-af7d24b09869\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692113 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn4jf\" (UniqueName: \"kubernetes.io/projected/a227b0fb-079f-4d81-9f3e-380202824892-kube-api-access-nn4jf\") pod \"neutron-operator-controller-manager-78d58447c5-mfx2z\" (UID: \"a227b0fb-079f-4d81-9f3e-380202824892\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692147 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql9kv\" (UniqueName: \"kubernetes.io/projected/744f20ad-f891-4948-8a8e-e0757333b75b-kube-api-access-ql9kv\") pod \"keystone-operator-controller-manager-b8b6d4659-795xn\" (UID: \"744f20ad-f891-4948-8a8e-e0757333b75b\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692218 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rhbm\" (UniqueName: \"kubernetes.io/projected/a78a18e3-25af-470d-8d1a-3bbc5b936703-kube-api-access-7rhbm\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-824xz\" (UID: \"a78a18e3-25af-470d-8d1a-3bbc5b936703\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692267 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxkmj\" (UniqueName: \"kubernetes.io/projected/4bd1bccc-c605-43f0-a9ed-1f82139a6f16-kube-api-access-cxkmj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g4rnc\" (UID: \"4bd1bccc-c605-43f0-a9ed-1f82139a6f16\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692314 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzjbm\" (UniqueName: \"kubernetes.io/projected/81ead9a2-beaa-4651-a9ee-aaeda0acd7c3-kube-api-access-mzjbm\") pod \"ironic-operator-controller-manager-598f7747c9-h2w8c\" (UID: \"81ead9a2-beaa-4651-a9ee-aaeda0acd7c3\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.692510 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.693434 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.697000 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-59bq2" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.699056 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.714416 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.731417 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.736272 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.761082 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.770317 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxkmj\" (UniqueName: \"kubernetes.io/projected/4bd1bccc-c605-43f0-a9ed-1f82139a6f16-kube-api-access-cxkmj\") pod \"horizon-operator-controller-manager-77d5c5b54f-g4rnc\" (UID: \"4bd1bccc-c605-43f0-a9ed-1f82139a6f16\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.770393 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvnpv\" (UniqueName: \"kubernetes.io/projected/cb13d95b-3b92-49bc-9bcd-af7d24b09869-kube-api-access-dvnpv\") pod \"heat-operator-controller-manager-594c8c9d5d-k9gch\" (UID: \"cb13d95b-3b92-49bc-9bcd-af7d24b09869\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.780147 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.781047 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.784703 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-4qx4p" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.790866 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.791656 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.792127 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.793386 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzjbm\" (UniqueName: \"kubernetes.io/projected/81ead9a2-beaa-4651-a9ee-aaeda0acd7c3-kube-api-access-mzjbm\") pod \"ironic-operator-controller-manager-598f7747c9-h2w8c\" (UID: \"81ead9a2-beaa-4651-a9ee-aaeda0acd7c3\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.793426 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnqb4\" (UniqueName: \"kubernetes.io/projected/ddb0bf87-c13a-48d3-9fa7-95c891f6c057-kube-api-access-hnqb4\") pod \"manila-operator-controller-manager-78c6999f6f-m5bwb\" (UID: \"ddb0bf87-c13a-48d3-9fa7-95c891f6c057\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.794108 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn4jf\" (UniqueName: \"kubernetes.io/projected/a227b0fb-079f-4d81-9f3e-380202824892-kube-api-access-nn4jf\") pod \"neutron-operator-controller-manager-78d58447c5-mfx2z\" (UID: \"a227b0fb-079f-4d81-9f3e-380202824892\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.794133 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql9kv\" (UniqueName: \"kubernetes.io/projected/744f20ad-f891-4948-8a8e-e0757333b75b-kube-api-access-ql9kv\") pod \"keystone-operator-controller-manager-b8b6d4659-795xn\" (UID: \"744f20ad-f891-4948-8a8e-e0757333b75b\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.794618 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rhbm\" (UniqueName: \"kubernetes.io/projected/a78a18e3-25af-470d-8d1a-3bbc5b936703-kube-api-access-7rhbm\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-824xz\" (UID: \"a78a18e3-25af-470d-8d1a-3bbc5b936703\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.796774 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-89ftx" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.797082 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.821922 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzjbm\" (UniqueName: \"kubernetes.io/projected/81ead9a2-beaa-4651-a9ee-aaeda0acd7c3-kube-api-access-mzjbm\") pod \"ironic-operator-controller-manager-598f7747c9-h2w8c\" (UID: \"81ead9a2-beaa-4651-a9ee-aaeda0acd7c3\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.823196 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql9kv\" (UniqueName: \"kubernetes.io/projected/744f20ad-f891-4948-8a8e-e0757333b75b-kube-api-access-ql9kv\") pod \"keystone-operator-controller-manager-b8b6d4659-795xn\" (UID: \"744f20ad-f891-4948-8a8e-e0757333b75b\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.828848 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnqb4\" (UniqueName: \"kubernetes.io/projected/ddb0bf87-c13a-48d3-9fa7-95c891f6c057-kube-api-access-hnqb4\") pod \"manila-operator-controller-manager-78c6999f6f-m5bwb\" (UID: \"ddb0bf87-c13a-48d3-9fa7-95c891f6c057\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.837580 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn4jf\" (UniqueName: \"kubernetes.io/projected/a227b0fb-079f-4d81-9f3e-380202824892-kube-api-access-nn4jf\") pod \"neutron-operator-controller-manager-78d58447c5-mfx2z\" (UID: \"a227b0fb-079f-4d81-9f3e-380202824892\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.838032 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.838391 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rhbm\" (UniqueName: \"kubernetes.io/projected/a78a18e3-25af-470d-8d1a-3bbc5b936703-kube-api-access-7rhbm\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-824xz\" (UID: \"a78a18e3-25af-470d-8d1a-3bbc5b936703\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.846750 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.865213 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.903954 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.904049 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsbmj\" (UniqueName: \"kubernetes.io/projected/ff2db85c-6bbf-45c8-b27b-93b2cff54130-kube-api-access-tsbmj\") pod \"octavia-operator-controller-manager-5f4cd88d46-t5lwz\" (UID: \"ff2db85c-6bbf-45c8-b27b-93b2cff54130\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.904145 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wlbc\" (UniqueName: \"kubernetes.io/projected/3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67-kube-api-access-5wlbc\") pod \"nova-operator-controller-manager-7bdb645866-clx4z\" (UID: \"3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.904190 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npg5d\" (UniqueName: \"kubernetes.io/projected/85aa101f-3371-46e6-84e5-83005bdb7799-kube-api-access-npg5d\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.913857 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.939422 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.942627 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.951158 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-pn95x" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.972483 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.976238 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.978865 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw"] Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.979283 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-hjklt" Jan 27 20:21:15 crc kubenswrapper[4793]: I0127 20:21:15.984425 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.006721 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.006800 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsbmj\" (UniqueName: \"kubernetes.io/projected/ff2db85c-6bbf-45c8-b27b-93b2cff54130-kube-api-access-tsbmj\") pod \"octavia-operator-controller-manager-5f4cd88d46-t5lwz\" (UID: \"ff2db85c-6bbf-45c8-b27b-93b2cff54130\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.006990 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wlbc\" (UniqueName: \"kubernetes.io/projected/3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67-kube-api-access-5wlbc\") pod \"nova-operator-controller-manager-7bdb645866-clx4z\" (UID: \"3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.007029 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npg5d\" (UniqueName: \"kubernetes.io/projected/85aa101f-3371-46e6-84e5-83005bdb7799-kube-api-access-npg5d\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.007804 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv"] Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.009715 4793 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.009778 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert podName:85aa101f-3371-46e6-84e5-83005bdb7799 nodeName:}" failed. No retries permitted until 2026-01-27 20:21:16.50975777 +0000 UTC m=+1101.900011006 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" (UID: "85aa101f-3371-46e6-84e5-83005bdb7799") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.010722 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.039038 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.040444 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-xxhb8" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.048218 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.049760 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.050162 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.052017 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-hzt9c" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.054736 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npg5d\" (UniqueName: \"kubernetes.io/projected/85aa101f-3371-46e6-84e5-83005bdb7799-kube-api-access-npg5d\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.056824 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.065071 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsbmj\" (UniqueName: \"kubernetes.io/projected/ff2db85c-6bbf-45c8-b27b-93b2cff54130-kube-api-access-tsbmj\") pod \"octavia-operator-controller-manager-5f4cd88d46-t5lwz\" (UID: \"ff2db85c-6bbf-45c8-b27b-93b2cff54130\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.065171 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.065463 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.068110 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wlbc\" (UniqueName: \"kubernetes.io/projected/3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67-kube-api-access-5wlbc\") pod \"nova-operator-controller-manager-7bdb645866-clx4z\" (UID: \"3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.072603 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.073780 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.080776 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.093812 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.095177 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.100213 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.108592 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj4xp\" (UniqueName: \"kubernetes.io/projected/1da2285e-00b1-4b86-993c-47be2c441dc0-kube-api-access-jj4xp\") pod \"ovn-operator-controller-manager-6f75f45d54-n85zq\" (UID: \"1da2285e-00b1-4b86-993c-47be2c441dc0\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.108640 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nvcq\" (UniqueName: \"kubernetes.io/projected/71401df0-2118-4561-9d06-e311458f5357-kube-api-access-5nvcq\") pod \"swift-operator-controller-manager-547cbdb99f-tqmbv\" (UID: \"71401df0-2118-4561-9d06-e311458f5357\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.108861 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.109009 4793 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.109073 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert podName:742adff5-75e8-4941-9815-03bc77850cfa nodeName:}" failed. No retries permitted until 2026-01-27 20:21:17.109053157 +0000 UTC m=+1102.499306313 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert") pod "infra-operator-controller-manager-694cf4f878-752f2" (UID: "742adff5-75e8-4941-9815-03bc77850cfa") : secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.109514 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mzjf\" (UniqueName: \"kubernetes.io/projected/d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d-kube-api-access-5mzjf\") pod \"placement-operator-controller-manager-79d5ccc684-wqcbj\" (UID: \"d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.110135 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.133179 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.134272 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-5mnd8" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.134313 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-mzfsx" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.144972 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.204654 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.206461 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217334 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mzjf\" (UniqueName: \"kubernetes.io/projected/d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d-kube-api-access-5mzjf\") pod \"placement-operator-controller-manager-79d5ccc684-wqcbj\" (UID: \"d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217426 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnnf8\" (UniqueName: \"kubernetes.io/projected/32f012e5-4f42-4aaf-bc4a-25ad68296efc-kube-api-access-vnnf8\") pod \"watcher-operator-controller-manager-5895dd5db-pkckf\" (UID: \"32f012e5-4f42-4aaf-bc4a-25ad68296efc\") " pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217475 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx4hm\" (UniqueName: \"kubernetes.io/projected/f6cc0f9d-084e-46a3-a2b9-691474c16005-kube-api-access-qx4hm\") pod \"telemetry-operator-controller-manager-85cd9769bb-q2ft5\" (UID: \"f6cc0f9d-084e-46a3-a2b9-691474c16005\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217572 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj4xp\" (UniqueName: \"kubernetes.io/projected/1da2285e-00b1-4b86-993c-47be2c441dc0-kube-api-access-jj4xp\") pod \"ovn-operator-controller-manager-6f75f45d54-n85zq\" (UID: \"1da2285e-00b1-4b86-993c-47be2c441dc0\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217608 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnlw5\" (UniqueName: \"kubernetes.io/projected/40d57f5e-8c71-4278-b169-c1c439c8fe4a-kube-api-access-fnlw5\") pod \"test-operator-controller-manager-69797bbcbd-gh2bm\" (UID: \"40d57f5e-8c71-4278-b169-c1c439c8fe4a\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.217642 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nvcq\" (UniqueName: \"kubernetes.io/projected/71401df0-2118-4561-9d06-e311458f5357-kube-api-access-5nvcq\") pod \"swift-operator-controller-manager-547cbdb99f-tqmbv\" (UID: \"71401df0-2118-4561-9d06-e311458f5357\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.603536 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnlw5\" (UniqueName: \"kubernetes.io/projected/40d57f5e-8c71-4278-b169-c1c439c8fe4a-kube-api-access-fnlw5\") pod \"test-operator-controller-manager-69797bbcbd-gh2bm\" (UID: \"40d57f5e-8c71-4278-b169-c1c439c8fe4a\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.603718 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.603898 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pt66\" (UniqueName: \"kubernetes.io/projected/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-kube-api-access-2pt66\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.604018 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnnf8\" (UniqueName: \"kubernetes.io/projected/32f012e5-4f42-4aaf-bc4a-25ad68296efc-kube-api-access-vnnf8\") pod \"watcher-operator-controller-manager-5895dd5db-pkckf\" (UID: \"32f012e5-4f42-4aaf-bc4a-25ad68296efc\") " pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.604145 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.604169 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx4hm\" (UniqueName: \"kubernetes.io/projected/f6cc0f9d-084e-46a3-a2b9-691474c16005-kube-api-access-qx4hm\") pod \"telemetry-operator-controller-manager-85cd9769bb-q2ft5\" (UID: \"f6cc0f9d-084e-46a3-a2b9-691474c16005\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.604295 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.618082 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh"] Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.618331 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-86hfp" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.618604 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.618738 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.625229 4793 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.625312 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert podName:85aa101f-3371-46e6-84e5-83005bdb7799 nodeName:}" failed. No retries permitted until 2026-01-27 20:21:17.625291467 +0000 UTC m=+1103.015544663 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" (UID: "85aa101f-3371-46e6-84e5-83005bdb7799") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.653498 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mzjf\" (UniqueName: \"kubernetes.io/projected/d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d-kube-api-access-5mzjf\") pod \"placement-operator-controller-manager-79d5ccc684-wqcbj\" (UID: \"d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.688467 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx4hm\" (UniqueName: \"kubernetes.io/projected/f6cc0f9d-084e-46a3-a2b9-691474c16005-kube-api-access-qx4hm\") pod \"telemetry-operator-controller-manager-85cd9769bb-q2ft5\" (UID: \"f6cc0f9d-084e-46a3-a2b9-691474c16005\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.700972 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnnf8\" (UniqueName: \"kubernetes.io/projected/32f012e5-4f42-4aaf-bc4a-25ad68296efc-kube-api-access-vnnf8\") pod \"watcher-operator-controller-manager-5895dd5db-pkckf\" (UID: \"32f012e5-4f42-4aaf-bc4a-25ad68296efc\") " pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.702053 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj4xp\" (UniqueName: \"kubernetes.io/projected/1da2285e-00b1-4b86-993c-47be2c441dc0-kube-api-access-jj4xp\") pod \"ovn-operator-controller-manager-6f75f45d54-n85zq\" (UID: \"1da2285e-00b1-4b86-993c-47be2c441dc0\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.711362 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.711470 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.711499 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pt66\" (UniqueName: \"kubernetes.io/projected/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-kube-api-access-2pt66\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.711890 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.711929 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:17.211914202 +0000 UTC m=+1102.602167358 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.711966 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: E0127 20:21:16.711988 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:17.211981633 +0000 UTC m=+1102.602234789 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.715106 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nvcq\" (UniqueName: \"kubernetes.io/projected/71401df0-2118-4561-9d06-e311458f5357-kube-api-access-5nvcq\") pod \"swift-operator-controller-manager-547cbdb99f-tqmbv\" (UID: \"71401df0-2118-4561-9d06-e311458f5357\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.752182 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnlw5\" (UniqueName: \"kubernetes.io/projected/40d57f5e-8c71-4278-b169-c1c439c8fe4a-kube-api-access-fnlw5\") pod \"test-operator-controller-manager-69797bbcbd-gh2bm\" (UID: \"40d57f5e-8c71-4278-b169-c1c439c8fe4a\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.810777 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.811994 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.830922 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.848399 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.930714 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:21:16 crc kubenswrapper[4793]: I0127 20:21:16.993028 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:21:17 crc kubenswrapper[4793]: I0127 20:21:17.317594 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:17 crc kubenswrapper[4793]: I0127 20:21:17.317716 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:17 crc kubenswrapper[4793]: I0127 20:21:17.317800 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.347174 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.347245 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:18.347222055 +0000 UTC m=+1103.737475211 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.347318 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.347346 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:18.347337498 +0000 UTC m=+1103.737590654 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.350527 4793 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.350743 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert podName:742adff5-75e8-4941-9815-03bc77850cfa nodeName:}" failed. No retries permitted until 2026-01-27 20:21:19.350728172 +0000 UTC m=+1104.740981328 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert") pod "infra-operator-controller-manager-694cf4f878-752f2" (UID: "742adff5-75e8-4941-9815-03bc77850cfa") : secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: I0127 20:21:17.405600 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pt66\" (UniqueName: \"kubernetes.io/projected/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-kube-api-access-2pt66\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:17 crc kubenswrapper[4793]: I0127 20:21:17.763105 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.763388 4793 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:17 crc kubenswrapper[4793]: E0127 20:21:17.763447 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert podName:85aa101f-3371-46e6-84e5-83005bdb7799 nodeName:}" failed. No retries permitted until 2026-01-27 20:21:19.763428661 +0000 UTC m=+1105.153681817 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" (UID: "85aa101f-3371-46e6-84e5-83005bdb7799") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.385249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.385567 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:18 crc kubenswrapper[4793]: E0127 20:21:18.385746 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:18 crc kubenswrapper[4793]: E0127 20:21:18.385902 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:18 crc kubenswrapper[4793]: E0127 20:21:18.385804 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:20.385789016 +0000 UTC m=+1105.776042172 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:18 crc kubenswrapper[4793]: E0127 20:21:18.386025 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:20.386013441 +0000 UTC m=+1105.776266597 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.522666 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685"] Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.565455 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz"] Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.573201 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.575637 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9"] Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.577239 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-9gwds" Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.604186 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz"] Jan 27 20:21:18 crc kubenswrapper[4793]: W0127 20:21:18.669732 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01780455_12a9_41cc_80bb_71d643522796.slice/crio-d479f532b7298ee8f23feeb3584075b2c2dfe75cd11a8aed6b087708ccf65c94 WatchSource:0}: Error finding container d479f532b7298ee8f23feeb3584075b2c2dfe75cd11a8aed6b087708ccf65c94: Status 404 returned error can't find the container with id d479f532b7298ee8f23feeb3584075b2c2dfe75cd11a8aed6b087708ccf65c94 Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.682523 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.691664 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz7dz\" (UniqueName: \"kubernetes.io/projected/b0693604-a724-43ba-92b9-d4a52ca4cf85-kube-api-access-hz7dz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-khvmz\" (UID: \"b0693604-a724-43ba-92b9-d4a52ca4cf85\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" Jan 27 20:21:18 crc kubenswrapper[4793]: I0127 20:21:18.818695 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz7dz\" (UniqueName: \"kubernetes.io/projected/b0693604-a724-43ba-92b9-d4a52ca4cf85-kube-api-access-hz7dz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-khvmz\" (UID: \"b0693604-a724-43ba-92b9-d4a52ca4cf85\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.214021 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz7dz\" (UniqueName: \"kubernetes.io/projected/b0693604-a724-43ba-92b9-d4a52ca4cf85-kube-api-access-hz7dz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-khvmz\" (UID: \"b0693604-a724-43ba-92b9-d4a52ca4cf85\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.223622 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.263045 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" event={"ID":"42bc0dc3-e9b2-4edc-865a-4d301956ec59","Type":"ContainerStarted","Data":"7b6df3233c04f81b642fc387234cc1737d89def3959eaca3bda0ac88d9f38dc0"} Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.270033 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" event={"ID":"01780455-12a9-41cc-80bb-71d643522796","Type":"ContainerStarted","Data":"d479f532b7298ee8f23feeb3584075b2c2dfe75cd11a8aed6b087708ccf65c94"} Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.432175 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:19 crc kubenswrapper[4793]: E0127 20:21:19.432439 4793 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:19 crc kubenswrapper[4793]: E0127 20:21:19.432810 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert podName:742adff5-75e8-4941-9815-03bc77850cfa nodeName:}" failed. No retries permitted until 2026-01-27 20:21:23.432772444 +0000 UTC m=+1108.823025610 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert") pod "infra-operator-controller-manager-694cf4f878-752f2" (UID: "742adff5-75e8-4941-9815-03bc77850cfa") : secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:19 crc kubenswrapper[4793]: I0127 20:21:19.771904 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:19 crc kubenswrapper[4793]: E0127 20:21:19.772072 4793 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:19 crc kubenswrapper[4793]: E0127 20:21:19.772149 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert podName:85aa101f-3371-46e6-84e5-83005bdb7799 nodeName:}" failed. No retries permitted until 2026-01-27 20:21:23.772127615 +0000 UTC m=+1109.162380771 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" (UID: "85aa101f-3371-46e6-84e5-83005bdb7799") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:20 crc kubenswrapper[4793]: I0127 20:21:20.387856 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:20 crc kubenswrapper[4793]: I0127 20:21:20.387970 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:20 crc kubenswrapper[4793]: E0127 20:21:20.388149 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:20 crc kubenswrapper[4793]: E0127 20:21:20.388192 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:24.388178335 +0000 UTC m=+1109.778431491 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:20 crc kubenswrapper[4793]: E0127 20:21:20.390824 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:20 crc kubenswrapper[4793]: E0127 20:21:20.391166 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:24.391144238 +0000 UTC m=+1109.781397434 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:21 crc kubenswrapper[4793]: I0127 20:21:21.515237 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc"] Jan 27 20:21:21 crc kubenswrapper[4793]: W0127 20:21:21.637719 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bd1bccc_c605_43f0_a9ed_1f82139a6f16.slice/crio-e8c127e547cc1413ef1d7e9b8fa08aff3af601e928129e111a85843af1609a3e WatchSource:0}: Error finding container e8c127e547cc1413ef1d7e9b8fa08aff3af601e928129e111a85843af1609a3e: Status 404 returned error can't find the container with id e8c127e547cc1413ef1d7e9b8fa08aff3af601e928129e111a85843af1609a3e Jan 27 20:21:21 crc kubenswrapper[4793]: I0127 20:21:21.654507 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn"] Jan 27 20:21:21 crc kubenswrapper[4793]: I0127 20:21:21.967820 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh"] Jan 27 20:21:21 crc kubenswrapper[4793]: I0127 20:21:21.982648 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.001563 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch"] Jan 27 20:21:22 crc kubenswrapper[4793]: W0127 20:21:22.004800 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4105e86a_92bd_45a9_869f_310d634a514c.slice/crio-ba8e76b3431973b8cb7c440b932cc348068727d39bbeebceee29363b9a902940 WatchSource:0}: Error finding container ba8e76b3431973b8cb7c440b932cc348068727d39bbeebceee29363b9a902940: Status 404 returned error can't find the container with id ba8e76b3431973b8cb7c440b932cc348068727d39bbeebceee29363b9a902940 Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.032132 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.040156 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.049323 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.054123 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.065676 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.079381 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.084531 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf"] Jan 27 20:21:22 crc kubenswrapper[4793]: W0127 20:21:22.084568 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff2db85c_6bbf_45c8_b27b_93b2cff54130.slice/crio-2389836aec50b628feaf7a942b7ce3c0c20f65c46b98990a7ad4f2aa6d8a306b WatchSource:0}: Error finding container 2389836aec50b628feaf7a942b7ce3c0c20f65c46b98990a7ad4f2aa6d8a306b: Status 404 returned error can't find the container with id 2389836aec50b628feaf7a942b7ce3c0c20f65c46b98990a7ad4f2aa6d8a306b Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.171912 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.180004 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv"] Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.180901 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nn4jf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-78d58447c5-mfx2z_openstack-operators(a227b0fb-079f-4d81-9f3e-380202824892): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.182507 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podUID="a227b0fb-079f-4d81-9f3e-380202824892" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.193663 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.193728 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq"] Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.208950 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz"] Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.210716 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5nvcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-tqmbv_openstack-operators(71401df0-2118-4561-9d06-e311458f5357): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.211832 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podUID="71401df0-2118-4561-9d06-e311458f5357" Jan 27 20:21:22 crc kubenswrapper[4793]: W0127 20:21:22.217420 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d4f2efb_7e15_4d10_ad22_a22b7ef0eb67.slice/crio-8089d9dd3682a418edf7e764be2b317a9b9811747b92f73df6eb3083adee8d18 WatchSource:0}: Error finding container 8089d9dd3682a418edf7e764be2b317a9b9811747b92f73df6eb3083adee8d18: Status 404 returned error can't find the container with id 8089d9dd3682a418edf7e764be2b317a9b9811747b92f73df6eb3083adee8d18 Jan 27 20:21:22 crc kubenswrapper[4793]: W0127 20:21:22.228042 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1da2285e_00b1_4b86_993c_47be2c441dc0.slice/crio-7ab66866fe52bab8af368a28d14079860b124f315007b14e021518db2a3949be WatchSource:0}: Error finding container 7ab66866fe52bab8af368a28d14079860b124f315007b14e021518db2a3949be: Status 404 returned error can't find the container with id 7ab66866fe52bab8af368a28d14079860b124f315007b14e021518db2a3949be Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.230612 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wlbc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-7bdb645866-clx4z_openstack-operators(3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.234066 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" podUID="3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.236362 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hz7dz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-khvmz_openstack-operators(b0693604-a724-43ba-92b9-d4a52ca4cf85): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.237272 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5"] Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.237927 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podUID="b0693604-a724-43ba-92b9-d4a52ca4cf85" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.239412 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jj4xp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-6f75f45d54-n85zq_openstack-operators(1da2285e-00b1-4b86-993c-47be2c441dc0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.240821 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" podUID="1da2285e-00b1-4b86-993c-47be2c441dc0" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.256811 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qx4hm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-q2ft5_openstack-operators(f6cc0f9d-084e-46a3-a2b9-691474c16005): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.258373 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" podUID="f6cc0f9d-084e-46a3-a2b9-691474c16005" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.482630 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" event={"ID":"4bd1bccc-c605-43f0-a9ed-1f82139a6f16","Type":"ContainerStarted","Data":"e8c127e547cc1413ef1d7e9b8fa08aff3af601e928129e111a85843af1609a3e"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.484161 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" event={"ID":"f6cc0f9d-084e-46a3-a2b9-691474c16005","Type":"ContainerStarted","Data":"54b03f0a8066d01767406bf241d16e1ce89558406811a73b7b504baa86d65e93"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.486037 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" podUID="f6cc0f9d-084e-46a3-a2b9-691474c16005" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.487260 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" event={"ID":"40d57f5e-8c71-4278-b169-c1c439c8fe4a","Type":"ContainerStarted","Data":"6cd9147a6b6eb61d40e519b0dfc07885339b865a8820363cba69559b14e22e68"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.489475 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" event={"ID":"71401df0-2118-4561-9d06-e311458f5357","Type":"ContainerStarted","Data":"86461beb4c3ccd42fdcbcf3235b5c3c279fca77132fa4b59392e5f20cdb5f5f3"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.491410 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" event={"ID":"b0693604-a724-43ba-92b9-d4a52ca4cf85","Type":"ContainerStarted","Data":"867fc1af821231abcb5af6df2ffb027e0507ff48899fa0d57a3baa397e9655dc"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.491537 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podUID="71401df0-2118-4561-9d06-e311458f5357" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.492869 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" event={"ID":"44104a6a-d41a-4b4c-b119-1886c9b48a8b","Type":"ContainerStarted","Data":"fab818ca49b6270fa0311e7e8cbcb62098f89247c9e2caf2dfa2c1fe5ee3d43d"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.494183 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" event={"ID":"a78a18e3-25af-470d-8d1a-3bbc5b936703","Type":"ContainerStarted","Data":"73b5e4e8fc58d75540159dc5e621c375186920cf78b09af125d4b4901a848074"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.494836 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podUID="b0693604-a724-43ba-92b9-d4a52ca4cf85" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.497098 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" event={"ID":"3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67","Type":"ContainerStarted","Data":"8089d9dd3682a418edf7e764be2b317a9b9811747b92f73df6eb3083adee8d18"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.509758 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658\\\"\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" podUID="3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.510358 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" event={"ID":"32f012e5-4f42-4aaf-bc4a-25ad68296efc","Type":"ContainerStarted","Data":"6c1a8f9f2fe8114c58a81eb5cb09e6973f3385732585a9913313d08a68a00697"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.512983 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" event={"ID":"ff2db85c-6bbf-45c8-b27b-93b2cff54130","Type":"ContainerStarted","Data":"2389836aec50b628feaf7a942b7ce3c0c20f65c46b98990a7ad4f2aa6d8a306b"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.515089 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" event={"ID":"ddb0bf87-c13a-48d3-9fa7-95c891f6c057","Type":"ContainerStarted","Data":"af4755e92157378756b3b765df12b92324af34461c5cb0b72728800f6fc16a3c"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.520942 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" event={"ID":"81ead9a2-beaa-4651-a9ee-aaeda0acd7c3","Type":"ContainerStarted","Data":"d13f6a5f2120e331bbbc975af5ba915274e40dc45537f6d2439437092517cf71"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.527762 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" event={"ID":"a227b0fb-079f-4d81-9f3e-380202824892","Type":"ContainerStarted","Data":"d69d9a5203d4e6e2fe71b111aaab281546fa694fb9a21db17830e14c9edb4f41"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.530168 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podUID="a227b0fb-079f-4d81-9f3e-380202824892" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.539962 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" event={"ID":"cb13d95b-3b92-49bc-9bcd-af7d24b09869","Type":"ContainerStarted","Data":"bb8816fcf65cbd789bc7e08ec2be1edc43d1f74350029cf9b1fa53866932d2f3"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.541135 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" event={"ID":"d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d","Type":"ContainerStarted","Data":"ded6deee7b63c11a892d31202c51cd7d3d0b16a8562988c7514ed638879e9bb9"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.541835 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" event={"ID":"1da2285e-00b1-4b86-993c-47be2c441dc0","Type":"ContainerStarted","Data":"7ab66866fe52bab8af368a28d14079860b124f315007b14e021518db2a3949be"} Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.548435 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" event={"ID":"4105e86a-92bd-45a9-869f-310d634a514c","Type":"ContainerStarted","Data":"ba8e76b3431973b8cb7c440b932cc348068727d39bbeebceee29363b9a902940"} Jan 27 20:21:22 crc kubenswrapper[4793]: E0127 20:21:22.548964 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" podUID="1da2285e-00b1-4b86-993c-47be2c441dc0" Jan 27 20:21:22 crc kubenswrapper[4793]: I0127 20:21:22.553404 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" event={"ID":"744f20ad-f891-4948-8a8e-e0757333b75b","Type":"ContainerStarted","Data":"a547b97a77d476b0a02d122e0411347a661afc7ba8f66890709a3053e095fc76"} Jan 27 20:21:23 crc kubenswrapper[4793]: I0127 20:21:23.436636 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.436789 4793 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.436842 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert podName:742adff5-75e8-4941-9815-03bc77850cfa nodeName:}" failed. No retries permitted until 2026-01-27 20:21:31.436827684 +0000 UTC m=+1116.827080840 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert") pod "infra-operator-controller-manager-694cf4f878-752f2" (UID: "742adff5-75e8-4941-9815-03bc77850cfa") : secret "infra-operator-webhook-server-cert" not found Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.567467 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" podUID="f6cc0f9d-084e-46a3-a2b9-691474c16005" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.568700 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podUID="b0693604-a724-43ba-92b9-d4a52ca4cf85" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.568738 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658\\\"\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" podUID="3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.568758 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podUID="71401df0-2118-4561-9d06-e311458f5357" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.569338 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" podUID="1da2285e-00b1-4b86-993c-47be2c441dc0" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.569995 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podUID="a227b0fb-079f-4d81-9f3e-380202824892" Jan 27 20:21:23 crc kubenswrapper[4793]: I0127 20:21:23.846732 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.846904 4793 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:23 crc kubenswrapper[4793]: E0127 20:21:23.846950 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert podName:85aa101f-3371-46e6-84e5-83005bdb7799 nodeName:}" failed. No retries permitted until 2026-01-27 20:21:31.846935109 +0000 UTC m=+1117.237188255 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" (UID: "85aa101f-3371-46e6-84e5-83005bdb7799") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 20:21:24 crc kubenswrapper[4793]: I0127 20:21:24.459373 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:24 crc kubenswrapper[4793]: E0127 20:21:24.459625 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:24 crc kubenswrapper[4793]: E0127 20:21:24.459921 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:32.459900962 +0000 UTC m=+1117.850154188 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:24 crc kubenswrapper[4793]: I0127 20:21:24.459820 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:24 crc kubenswrapper[4793]: E0127 20:21:24.459974 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:24 crc kubenswrapper[4793]: E0127 20:21:24.460058 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:32.460024775 +0000 UTC m=+1117.850278001 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:31 crc kubenswrapper[4793]: I0127 20:21:31.455399 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:31 crc kubenswrapper[4793]: I0127 20:21:31.462036 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/742adff5-75e8-4941-9815-03bc77850cfa-cert\") pod \"infra-operator-controller-manager-694cf4f878-752f2\" (UID: \"742adff5-75e8-4941-9815-03bc77850cfa\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:31 crc kubenswrapper[4793]: I0127 20:21:31.709987 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:21:31 crc kubenswrapper[4793]: I0127 20:21:31.862245 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:31 crc kubenswrapper[4793]: I0127 20:21:31.885214 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85aa101f-3371-46e6-84e5-83005bdb7799-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85428jbw\" (UID: \"85aa101f-3371-46e6-84e5-83005bdb7799\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:32 crc kubenswrapper[4793]: I0127 20:21:32.054966 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:21:32 crc kubenswrapper[4793]: I0127 20:21:32.469460 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:32 crc kubenswrapper[4793]: I0127 20:21:32.469595 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:32 crc kubenswrapper[4793]: E0127 20:21:32.469649 4793 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 20:21:32 crc kubenswrapper[4793]: E0127 20:21:32.469730 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:48.469711684 +0000 UTC m=+1133.859964840 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "metrics-server-cert" not found Jan 27 20:21:32 crc kubenswrapper[4793]: E0127 20:21:32.469744 4793 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 20:21:32 crc kubenswrapper[4793]: E0127 20:21:32.469807 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs podName:68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b nodeName:}" failed. No retries permitted until 2026-01-27 20:21:48.469790035 +0000 UTC m=+1133.860043251 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs") pod "openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" (UID: "68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b") : secret "webhook-server-cert" not found Jan 27 20:21:35 crc kubenswrapper[4793]: E0127 20:21:35.541005 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822" Jan 27 20:21:35 crc kubenswrapper[4793]: E0127 20:21:35.541479 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cxkmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-77d5c5b54f-g4rnc_openstack-operators(4bd1bccc-c605-43f0-a9ed-1f82139a6f16): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:35 crc kubenswrapper[4793]: E0127 20:21:35.543018 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" podUID="4bd1bccc-c605-43f0-a9ed-1f82139a6f16" Jan 27 20:21:36 crc kubenswrapper[4793]: E0127 20:21:36.661618 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" podUID="4bd1bccc-c605-43f0-a9ed-1f82139a6f16" Jan 27 20:21:36 crc kubenswrapper[4793]: E0127 20:21:36.940003 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337" Jan 27 20:21:36 crc kubenswrapper[4793]: E0127 20:21:36.940317 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p66rq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-78fdd796fd-crv46_openstack-operators(44104a6a-d41a-4b4c-b119-1886c9b48a8b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:36 crc kubenswrapper[4793]: E0127 20:21:36.942271 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" podUID="44104a6a-d41a-4b4c-b119-1886c9b48a8b" Jan 27 20:21:37 crc kubenswrapper[4793]: E0127 20:21:37.822123 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337\\\"\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" podUID="44104a6a-d41a-4b4c-b119-1886c9b48a8b" Jan 27 20:21:46 crc kubenswrapper[4793]: E0127 20:21:46.014914 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e" Jan 27 20:21:46 crc kubenswrapper[4793]: E0127 20:21:46.015660 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mzjbm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-598f7747c9-h2w8c_openstack-operators(81ead9a2-beaa-4651-a9ee-aaeda0acd7c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:46 crc kubenswrapper[4793]: E0127 20:21:46.017743 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" podUID="81ead9a2-beaa-4651-a9ee-aaeda0acd7c3" Jan 27 20:21:46 crc kubenswrapper[4793]: E0127 20:21:46.686093 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" podUID="81ead9a2-beaa-4651-a9ee-aaeda0acd7c3" Jan 27 20:21:48 crc kubenswrapper[4793]: I0127 20:21:48.499383 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:48 crc kubenswrapper[4793]: I0127 20:21:48.500019 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:48 crc kubenswrapper[4793]: I0127 20:21:48.508157 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-metrics-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:48 crc kubenswrapper[4793]: I0127 20:21:48.516595 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b-webhook-certs\") pod \"openstack-operator-controller-manager-5b4bbd6fc8-fhjlh\" (UID: \"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b\") " pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:48 crc kubenswrapper[4793]: I0127 20:21:48.665035 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:21:50 crc kubenswrapper[4793]: E0127 20:21:50.462317 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d" Jan 27 20:21:50 crc kubenswrapper[4793]: E0127 20:21:50.462849 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5mzjf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-79d5ccc684-wqcbj_openstack-operators(d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:50 crc kubenswrapper[4793]: E0127 20:21:50.464067 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" podUID="d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d" Jan 27 20:21:50 crc kubenswrapper[4793]: E0127 20:21:50.716458 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" podUID="d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d" Jan 27 20:21:54 crc kubenswrapper[4793]: E0127 20:21:54.479512 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8" Jan 27 20:21:54 crc kubenswrapper[4793]: E0127 20:21:54.480283 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hnqb4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-78c6999f6f-m5bwb_openstack-operators(ddb0bf87-c13a-48d3-9fa7-95c891f6c057): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:54 crc kubenswrapper[4793]: E0127 20:21:54.481758 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" podUID="ddb0bf87-c13a-48d3-9fa7-95c891f6c057" Jan 27 20:21:54 crc kubenswrapper[4793]: E0127 20:21:54.747097 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" podUID="ddb0bf87-c13a-48d3-9fa7-95c891f6c057" Jan 27 20:21:55 crc kubenswrapper[4793]: E0127 20:21:55.798105 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece" Jan 27 20:21:55 crc kubenswrapper[4793]: E0127 20:21:55.798283 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n9wcn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-b45d7bf98-pddnh_openstack-operators(4105e86a-92bd-45a9-869f-310d634a514c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:55 crc kubenswrapper[4793]: E0127 20:21:55.800245 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" podUID="4105e86a-92bd-45a9-869f-310d634a514c" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.581598 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.581822 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fnlw5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-gh2bm_openstack-operators(40d57f5e-8c71-4278-b169-c1c439c8fe4a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.582989 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" podUID="40d57f5e-8c71-4278-b169-c1c439c8fe4a" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.650688 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/openstack-k8s-operators/watcher-operator:add353f857c04debbf620f926c6c19f4f45c7f75" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.651047 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/openstack-k8s-operators/watcher-operator:add353f857c04debbf620f926c6c19f4f45c7f75" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.651358 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.195:5001/openstack-k8s-operators/watcher-operator:add353f857c04debbf620f926c6c19f4f45c7f75,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vnnf8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5895dd5db-pkckf_openstack-operators(32f012e5-4f42-4aaf-bc4a-25ad68296efc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.652593 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" podUID="32f012e5-4f42-4aaf-bc4a-25ad68296efc" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.819443 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece\\\"\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" podUID="4105e86a-92bd-45a9-869f-310d634a514c" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.819472 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/openstack-k8s-operators/watcher-operator:add353f857c04debbf620f926c6c19f4f45c7f75\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" podUID="32f012e5-4f42-4aaf-bc4a-25ad68296efc" Jan 27 20:21:56 crc kubenswrapper[4793]: E0127 20:21:56.819472 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" podUID="40d57f5e-8c71-4278-b169-c1c439c8fe4a" Jan 27 20:21:57 crc kubenswrapper[4793]: E0127 20:21:57.505838 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 27 20:21:57 crc kubenswrapper[4793]: E0127 20:21:57.506009 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ql9kv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-795xn_openstack-operators(744f20ad-f891-4948-8a8e-e0757333b75b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:57 crc kubenswrapper[4793]: E0127 20:21:57.507211 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" podUID="744f20ad-f891-4948-8a8e-e0757333b75b" Jan 27 20:21:57 crc kubenswrapper[4793]: E0127 20:21:57.767637 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" podUID="744f20ad-f891-4948-8a8e-e0757333b75b" Jan 27 20:21:58 crc kubenswrapper[4793]: E0127 20:21:58.447929 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd" Jan 27 20:21:58 crc kubenswrapper[4793]: E0127 20:21:58.448200 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tsbmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-5f4cd88d46-t5lwz_openstack-operators(ff2db85c-6bbf-45c8-b27b-93b2cff54130): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:21:58 crc kubenswrapper[4793]: E0127 20:21:58.449570 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" podUID="ff2db85c-6bbf-45c8-b27b-93b2cff54130" Jan 27 20:21:58 crc kubenswrapper[4793]: E0127 20:21:58.775872 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" podUID="ff2db85c-6bbf-45c8-b27b-93b2cff54130" Jan 27 20:22:00 crc kubenswrapper[4793]: E0127 20:22:00.570110 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 27 20:22:00 crc kubenswrapper[4793]: E0127 20:22:00.570571 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5nvcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-tqmbv_openstack-operators(71401df0-2118-4561-9d06-e311458f5357): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:22:00 crc kubenswrapper[4793]: E0127 20:22:00.571848 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podUID="71401df0-2118-4561-9d06-e311458f5357" Jan 27 20:22:04 crc kubenswrapper[4793]: E0127 20:22:04.859961 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e" Jan 27 20:22:04 crc kubenswrapper[4793]: E0127 20:22:04.860686 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nn4jf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-78d58447c5-mfx2z_openstack-operators(a227b0fb-079f-4d81-9f3e-380202824892): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:22:04 crc kubenswrapper[4793]: E0127 20:22:04.861923 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podUID="a227b0fb-079f-4d81-9f3e-380202824892" Jan 27 20:22:05 crc kubenswrapper[4793]: I0127 20:22:05.421905 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw"] Jan 27 20:22:06 crc kubenswrapper[4793]: E0127 20:22:06.183006 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 27 20:22:06 crc kubenswrapper[4793]: E0127 20:22:06.183176 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hz7dz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-khvmz_openstack-operators(b0693604-a724-43ba-92b9-d4a52ca4cf85): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:22:06 crc kubenswrapper[4793]: E0127 20:22:06.184325 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podUID="b0693604-a724-43ba-92b9-d4a52ca4cf85" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.091920 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" event={"ID":"a78a18e3-25af-470d-8d1a-3bbc5b936703","Type":"ContainerStarted","Data":"c3424b794828087a45447e6be368f6446d280ec5f6f6acacf821c2aff2211afd"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.092816 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.096756 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" event={"ID":"3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67","Type":"ContainerStarted","Data":"1c9d0eb3ed75c3ac9be8631c8c43c686f835f9d7c56eac208e866263ffa57ddb"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.097059 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.105243 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" event={"ID":"85aa101f-3371-46e6-84e5-83005bdb7799","Type":"ContainerStarted","Data":"fbaecc989f8dd7ff20dd7140a3719fb2fc7de6b7ec11c2540ba44ea74b5febb1"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.109304 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" event={"ID":"cb13d95b-3b92-49bc-9bcd-af7d24b09869","Type":"ContainerStarted","Data":"75e8dd66f4bf4346f0d6f453be157fb15551e2ee2f469421228e5e85c37dbe56"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.110576 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.113034 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" event={"ID":"01780455-12a9-41cc-80bb-71d643522796","Type":"ContainerStarted","Data":"89b6fc5b259ee5aa88374bbfbf96b47d370ad4ee6da9471965cbf08289d98227"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.113721 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.122115 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" event={"ID":"4bd1bccc-c605-43f0-a9ed-1f82139a6f16","Type":"ContainerStarted","Data":"c3ebcf9db5da01b8e054d5bb55546b9381f00482d8ef62552ac4768576e47ab4"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.123231 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.127685 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" podStartSLOduration=15.87018885 podStartE2EDuration="52.127656254s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.07791486 +0000 UTC m=+1107.468168016" lastFinishedPulling="2026-01-27 20:21:58.335382264 +0000 UTC m=+1143.725635420" observedRunningTime="2026-01-27 20:22:07.121319479 +0000 UTC m=+1152.511572645" watchObservedRunningTime="2026-01-27 20:22:07.127656254 +0000 UTC m=+1152.517909410" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.129004 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" event={"ID":"f6cc0f9d-084e-46a3-a2b9-691474c16005","Type":"ContainerStarted","Data":"f6642ef231930aec9c2967818f62d87f4731a2a55112abdde8e81357467d20a2"} Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.130172 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.216659 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" podStartSLOduration=14.142514932 podStartE2EDuration="52.216642818s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:18.686164898 +0000 UTC m=+1104.076418054" lastFinishedPulling="2026-01-27 20:21:56.760292784 +0000 UTC m=+1142.150545940" observedRunningTime="2026-01-27 20:22:07.215166071 +0000 UTC m=+1152.605419227" watchObservedRunningTime="2026-01-27 20:22:07.216642818 +0000 UTC m=+1152.606895984" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.236962 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" podStartSLOduration=8.331210441 podStartE2EDuration="52.236937847s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.227839404 +0000 UTC m=+1107.618092560" lastFinishedPulling="2026-01-27 20:22:06.1335668 +0000 UTC m=+1151.523819966" observedRunningTime="2026-01-27 20:22:07.17210966 +0000 UTC m=+1152.562362826" watchObservedRunningTime="2026-01-27 20:22:07.236937847 +0000 UTC m=+1152.627191003" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.263201 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-752f2"] Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.265618 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" podStartSLOduration=15.985698897 podStartE2EDuration="52.265600984s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.055428126 +0000 UTC m=+1107.445681272" lastFinishedPulling="2026-01-27 20:21:58.335330203 +0000 UTC m=+1143.725583359" observedRunningTime="2026-01-27 20:22:07.253520886 +0000 UTC m=+1152.643774042" watchObservedRunningTime="2026-01-27 20:22:07.265600984 +0000 UTC m=+1152.655854140" Jan 27 20:22:07 crc kubenswrapper[4793]: W0127 20:22:07.271103 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod742adff5_75e8_4941_9815_03bc77850cfa.slice/crio-db808f29f903d32b261463ca304fb2f467da6e940c19c14451e6a3f724317c46 WatchSource:0}: Error finding container db808f29f903d32b261463ca304fb2f467da6e940c19c14451e6a3f724317c46: Status 404 returned error can't find the container with id db808f29f903d32b261463ca304fb2f467da6e940c19c14451e6a3f724317c46 Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.310587 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh"] Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.311030 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" podStartSLOduration=7.841090295 podStartE2EDuration="52.310997652s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:21.664719939 +0000 UTC m=+1107.054973105" lastFinishedPulling="2026-01-27 20:22:06.134627306 +0000 UTC m=+1151.524880462" observedRunningTime="2026-01-27 20:22:07.27801641 +0000 UTC m=+1152.668269576" watchObservedRunningTime="2026-01-27 20:22:07.310997652 +0000 UTC m=+1152.701250818" Jan 27 20:22:07 crc kubenswrapper[4793]: I0127 20:22:07.316427 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" podStartSLOduration=8.304952555 podStartE2EDuration="52.316410265s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.256664705 +0000 UTC m=+1107.646917861" lastFinishedPulling="2026-01-27 20:22:06.268122415 +0000 UTC m=+1151.658375571" observedRunningTime="2026-01-27 20:22:07.305062256 +0000 UTC m=+1152.695315412" watchObservedRunningTime="2026-01-27 20:22:07.316410265 +0000 UTC m=+1152.706663421" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.148400 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" event={"ID":"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b","Type":"ContainerStarted","Data":"5eedc7ea3c5e103a8b6cf65b6380206d4c7e4f95c624696ca93a07c341be95d6"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.148698 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" event={"ID":"68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b","Type":"ContainerStarted","Data":"a99c0a201ce4295a6857151e50dea3c2b576f051494e53e3738a69335ca31b06"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.148730 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.163373 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" event={"ID":"44104a6a-d41a-4b4c-b119-1886c9b48a8b","Type":"ContainerStarted","Data":"3b84e0bb25fa3df21f7c11cdd11d05609e3aaa4e3b37fdfcfa0e54cbbb92614a"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.164084 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.165828 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" event={"ID":"742adff5-75e8-4941-9815-03bc77850cfa","Type":"ContainerStarted","Data":"db808f29f903d32b261463ca304fb2f467da6e940c19c14451e6a3f724317c46"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.445828 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" event={"ID":"81ead9a2-beaa-4651-a9ee-aaeda0acd7c3","Type":"ContainerStarted","Data":"b7322c2fe68e35f7f68444573528b924fbf20e5f432c75d28128473424b47a7c"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.446086 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.449371 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" podStartSLOduration=52.449349211 podStartE2EDuration="52.449349211s" podCreationTimestamp="2026-01-27 20:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:22:08.444781099 +0000 UTC m=+1153.835034255" watchObservedRunningTime="2026-01-27 20:22:08.449349211 +0000 UTC m=+1153.839602367" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.458845 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" event={"ID":"42bc0dc3-e9b2-4edc-865a-4d301956ec59","Type":"ContainerStarted","Data":"c6912b647ed7e949a0f6a076130012443d35a24d4e7b75d49cb27c69185fe8aa"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.459634 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.473049 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" podStartSLOduration=9.370171421 podStartE2EDuration="53.473004394s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.030639955 +0000 UTC m=+1107.420893111" lastFinishedPulling="2026-01-27 20:22:06.133472918 +0000 UTC m=+1151.523726084" observedRunningTime="2026-01-27 20:22:08.464605767 +0000 UTC m=+1153.854858923" watchObservedRunningTime="2026-01-27 20:22:08.473004394 +0000 UTC m=+1153.863257550" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.480581 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" event={"ID":"d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d","Type":"ContainerStarted","Data":"92c8d247d0fb18e8c2455f7ddb94d8a7235c0204cca3b83124efcccbe937c2e0"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.481389 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.484404 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" event={"ID":"1da2285e-00b1-4b86-993c-47be2c441dc0","Type":"ContainerStarted","Data":"fdcd58e3eed757378de693ee28dd47938c0204c4c786280e20103c05eae45017"} Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.484830 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.541840 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" podStartSLOduration=9.185554272 podStartE2EDuration="53.541816269s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.064059089 +0000 UTC m=+1107.454312245" lastFinishedPulling="2026-01-27 20:22:06.420321086 +0000 UTC m=+1151.810574242" observedRunningTime="2026-01-27 20:22:08.537422781 +0000 UTC m=+1153.927675937" watchObservedRunningTime="2026-01-27 20:22:08.541816269 +0000 UTC m=+1153.932069425" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.542608 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" podStartSLOduration=15.403742487 podStartE2EDuration="53.542601329s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:18.682189629 +0000 UTC m=+1104.072442785" lastFinishedPulling="2026-01-27 20:21:56.821048461 +0000 UTC m=+1142.211301627" observedRunningTime="2026-01-27 20:22:08.512510157 +0000 UTC m=+1153.902763313" watchObservedRunningTime="2026-01-27 20:22:08.542601329 +0000 UTC m=+1153.932854485" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.570567 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" podStartSLOduration=9.676351816 podStartE2EDuration="53.570530767s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.239304457 +0000 UTC m=+1107.629557613" lastFinishedPulling="2026-01-27 20:22:06.133483408 +0000 UTC m=+1151.523736564" observedRunningTime="2026-01-27 20:22:08.563837702 +0000 UTC m=+1153.954090858" watchObservedRunningTime="2026-01-27 20:22:08.570530767 +0000 UTC m=+1153.960783923" Jan 27 20:22:08 crc kubenswrapper[4793]: I0127 20:22:08.591947 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" podStartSLOduration=9.27920785 podStartE2EDuration="53.591927404s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.106870794 +0000 UTC m=+1107.497123950" lastFinishedPulling="2026-01-27 20:22:06.419590348 +0000 UTC m=+1151.809843504" observedRunningTime="2026-01-27 20:22:08.587318071 +0000 UTC m=+1153.977571227" watchObservedRunningTime="2026-01-27 20:22:08.591927404 +0000 UTC m=+1153.982180560" Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.611778 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" event={"ID":"32f012e5-4f42-4aaf-bc4a-25ad68296efc","Type":"ContainerStarted","Data":"55368b9227241e8e9d270e8492271a3fcbb60cae45efe4665162bdf20567ce6c"} Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.613078 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.617860 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" event={"ID":"4105e86a-92bd-45a9-869f-310d634a514c","Type":"ContainerStarted","Data":"58d365ce3102c6a40f85bd7eeb88123d16553c91e23f9ca67b0b869a73ed5859"} Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.620335 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.625090 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" event={"ID":"744f20ad-f891-4948-8a8e-e0757333b75b","Type":"ContainerStarted","Data":"b1c63f745aaebc93dd145929d82a3774a8edcd66908852dfd57b654235df8345"} Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.626977 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.667352 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" podStartSLOduration=8.852879086 podStartE2EDuration="55.667322802s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.126700163 +0000 UTC m=+1107.516953319" lastFinishedPulling="2026-01-27 20:22:08.941143879 +0000 UTC m=+1154.331397035" observedRunningTime="2026-01-27 20:22:10.648116359 +0000 UTC m=+1156.038369545" watchObservedRunningTime="2026-01-27 20:22:10.667322802 +0000 UTC m=+1156.057575958" Jan 27 20:22:10 crc kubenswrapper[4793]: I0127 20:22:10.735253 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" podStartSLOduration=7.454545641 podStartE2EDuration="55.735222645s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:21.671310802 +0000 UTC m=+1107.061563958" lastFinishedPulling="2026-01-27 20:22:09.951987816 +0000 UTC m=+1155.342240962" observedRunningTime="2026-01-27 20:22:10.703644787 +0000 UTC m=+1156.093897943" watchObservedRunningTime="2026-01-27 20:22:10.735222645 +0000 UTC m=+1156.125475811" Jan 27 20:22:11 crc kubenswrapper[4793]: I0127 20:22:11.687979 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" event={"ID":"ddb0bf87-c13a-48d3-9fa7-95c891f6c057","Type":"ContainerStarted","Data":"8ac7c614a3feaf23ab5d405c03267e8590adc8f2c1e2ca18abf6a890968afeea"} Jan 27 20:22:11 crc kubenswrapper[4793]: I0127 20:22:11.689696 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:22:11 crc kubenswrapper[4793]: I0127 20:22:11.717149 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" podStartSLOduration=8.297159783 podStartE2EDuration="56.717124439s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.073718987 +0000 UTC m=+1107.463972143" lastFinishedPulling="2026-01-27 20:22:10.493683643 +0000 UTC m=+1155.883936799" observedRunningTime="2026-01-27 20:22:11.714292639 +0000 UTC m=+1157.104545805" watchObservedRunningTime="2026-01-27 20:22:11.717124439 +0000 UTC m=+1157.107377595" Jan 27 20:22:11 crc kubenswrapper[4793]: I0127 20:22:11.722673 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" podStartSLOduration=10.158595808 podStartE2EDuration="56.722651025s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.030573584 +0000 UTC m=+1107.420826740" lastFinishedPulling="2026-01-27 20:22:08.594628801 +0000 UTC m=+1153.984881957" observedRunningTime="2026-01-27 20:22:10.739115641 +0000 UTC m=+1156.129368817" watchObservedRunningTime="2026-01-27 20:22:11.722651025 +0000 UTC m=+1157.112904181" Jan 27 20:22:13 crc kubenswrapper[4793]: I0127 20:22:13.767473 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" event={"ID":"40d57f5e-8c71-4278-b169-c1c439c8fe4a","Type":"ContainerStarted","Data":"5f120e8252e0db1505de999bdaaadd005740d4c31c35a6e27d335c45c22fa0fd"} Jan 27 20:22:13 crc kubenswrapper[4793]: I0127 20:22:13.768329 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:22:13 crc kubenswrapper[4793]: I0127 20:22:13.868295 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" podStartSLOduration=8.225969218 podStartE2EDuration="58.868278623s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.05437047 +0000 UTC m=+1107.444623616" lastFinishedPulling="2026-01-27 20:22:12.696679865 +0000 UTC m=+1158.086933021" observedRunningTime="2026-01-27 20:22:13.865486614 +0000 UTC m=+1159.255739770" watchObservedRunningTime="2026-01-27 20:22:13.868278623 +0000 UTC m=+1159.258531779" Jan 27 20:22:14 crc kubenswrapper[4793]: E0127 20:22:14.806056 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podUID="71401df0-2118-4561-9d06-e311458f5357" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.656715 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qt685" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.705344 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-jn9j9" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.748081 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-crv46" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.754411 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-pddnh" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.856934 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-h2w8c" Jan 27 20:22:15 crc kubenswrapper[4793]: I0127 20:22:15.873431 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-795xn" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.145848 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-k9gch" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.151834 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-clx4z" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.193379 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-824xz" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.552913 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-m5bwb" Jan 27 20:22:16 crc kubenswrapper[4793]: E0127 20:22:16.807114 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podUID="b0693604-a724-43ba-92b9-d4a52ca4cf85" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.838370 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" podUID="4bd1bccc-c605-43f0-a9ed-1f82139a6f16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.844449 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-q2ft5" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.855097 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5895dd5db-pkckf" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.945063 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-wqcbj" Jan 27 20:22:16 crc kubenswrapper[4793]: I0127 20:22:16.996740 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-n85zq" Jan 27 20:22:18 crc kubenswrapper[4793]: E0127 20:22:18.842951 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:816d474f502d730d6a2522a272b0e09a2d579ac63617817655d60c54bda4191e\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podUID="a227b0fb-079f-4d81-9f3e-380202824892" Jan 27 20:22:18 crc kubenswrapper[4793]: I0127 20:22:18.920650 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5b4bbd6fc8-fhjlh" Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.152789 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" event={"ID":"85aa101f-3371-46e6-84e5-83005bdb7799","Type":"ContainerStarted","Data":"5b41c52b72c715d2eab7ddb1d760c9556dc73fd7cca555b7013eb757cd499e18"} Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.153223 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.155421 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" event={"ID":"ff2db85c-6bbf-45c8-b27b-93b2cff54130","Type":"ContainerStarted","Data":"fa6581becd6b8d104452635cca3a1dcf6a76c9e14c4435c24d6bcf6e1247eefc"} Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.155750 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.199720 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" event={"ID":"742adff5-75e8-4941-9815-03bc77850cfa","Type":"ContainerStarted","Data":"580b1ea79292b57e668a68ccd0f4b6ba63f9d8023955a7afa6de691ef5248743"} Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.200399 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.218698 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" podStartSLOduration=52.459703076 podStartE2EDuration="1m6.218672097s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:22:06.267771977 +0000 UTC m=+1151.658025133" lastFinishedPulling="2026-01-27 20:22:20.026740998 +0000 UTC m=+1165.416994154" observedRunningTime="2026-01-27 20:22:21.211126351 +0000 UTC m=+1166.601379507" watchObservedRunningTime="2026-01-27 20:22:21.218672097 +0000 UTC m=+1166.608925253" Jan 27 20:22:21 crc kubenswrapper[4793]: I0127 20:22:21.273372 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" podStartSLOduration=8.349311227 podStartE2EDuration="1m6.273342314s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.108265788 +0000 UTC m=+1107.498518944" lastFinishedPulling="2026-01-27 20:22:20.032296875 +0000 UTC m=+1165.422550031" observedRunningTime="2026-01-27 20:22:21.259975925 +0000 UTC m=+1166.650229081" watchObservedRunningTime="2026-01-27 20:22:21.273342314 +0000 UTC m=+1166.663595470" Jan 27 20:22:25 crc kubenswrapper[4793]: I0127 20:22:25.794008 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-g4rnc" Jan 27 20:22:25 crc kubenswrapper[4793]: I0127 20:22:25.812304 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" podStartSLOduration=58.066674588 podStartE2EDuration="1m10.812281861s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:22:07.284798646 +0000 UTC m=+1152.675051802" lastFinishedPulling="2026-01-27 20:22:20.030405919 +0000 UTC m=+1165.420659075" observedRunningTime="2026-01-27 20:22:21.296016852 +0000 UTC m=+1166.686270008" watchObservedRunningTime="2026-01-27 20:22:25.812281861 +0000 UTC m=+1171.202535017" Jan 27 20:22:26 crc kubenswrapper[4793]: I0127 20:22:26.226659 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-t5lwz" Jan 27 20:22:26 crc kubenswrapper[4793]: I0127 20:22:26.836506 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-gh2bm" Jan 27 20:22:27 crc kubenswrapper[4793]: I0127 20:22:27.340034 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" event={"ID":"71401df0-2118-4561-9d06-e311458f5357","Type":"ContainerStarted","Data":"6d8fe258edd0b729a9dde689d704480764227c3ba8c208fa303bd83cae73fc9e"} Jan 27 20:22:27 crc kubenswrapper[4793]: I0127 20:22:27.340249 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:22:27 crc kubenswrapper[4793]: I0127 20:22:27.368494 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" podStartSLOduration=8.279330469 podStartE2EDuration="1m12.368473796s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.21061312 +0000 UTC m=+1107.600866276" lastFinishedPulling="2026-01-27 20:22:26.299756447 +0000 UTC m=+1171.690009603" observedRunningTime="2026-01-27 20:22:27.355153818 +0000 UTC m=+1172.745407014" watchObservedRunningTime="2026-01-27 20:22:27.368473796 +0000 UTC m=+1172.758726942" Jan 27 20:22:30 crc kubenswrapper[4793]: I0127 20:22:30.431309 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" event={"ID":"b0693604-a724-43ba-92b9-d4a52ca4cf85","Type":"ContainerStarted","Data":"0dd7cea7fa642860055357b0d30758f5e73e566694fb03bd63b1cb48171fba0b"} Jan 27 20:22:30 crc kubenswrapper[4793]: I0127 20:22:30.450730 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-khvmz" podStartSLOduration=6.231235258 podStartE2EDuration="1m13.450711264s" podCreationTimestamp="2026-01-27 20:21:17 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.235890223 +0000 UTC m=+1107.626143379" lastFinishedPulling="2026-01-27 20:22:29.455366229 +0000 UTC m=+1174.845619385" observedRunningTime="2026-01-27 20:22:30.445843644 +0000 UTC m=+1175.836096800" watchObservedRunningTime="2026-01-27 20:22:30.450711264 +0000 UTC m=+1175.840964420" Jan 27 20:22:31 crc kubenswrapper[4793]: I0127 20:22:31.439477 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" event={"ID":"a227b0fb-079f-4d81-9f3e-380202824892","Type":"ContainerStarted","Data":"caf835d358c0533ad2bea5c838d48919d80f3c6e3bba9832122b83f2277c2ce0"} Jan 27 20:22:31 crc kubenswrapper[4793]: I0127 20:22:31.440020 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:22:31 crc kubenswrapper[4793]: I0127 20:22:31.462749 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" podStartSLOduration=8.362147181 podStartE2EDuration="1m16.462725399s" podCreationTimestamp="2026-01-27 20:21:15 +0000 UTC" firstStartedPulling="2026-01-27 20:21:22.180772095 +0000 UTC m=+1107.571025251" lastFinishedPulling="2026-01-27 20:22:30.281350313 +0000 UTC m=+1175.671603469" observedRunningTime="2026-01-27 20:22:31.459381646 +0000 UTC m=+1176.849634802" watchObservedRunningTime="2026-01-27 20:22:31.462725399 +0000 UTC m=+1176.852978555" Jan 27 20:22:31 crc kubenswrapper[4793]: I0127 20:22:31.718604 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-752f2" Jan 27 20:22:32 crc kubenswrapper[4793]: I0127 20:22:32.062105 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85428jbw" Jan 27 20:22:36 crc kubenswrapper[4793]: I0127 20:22:36.115091 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-mfx2z" Jan 27 20:22:36 crc kubenswrapper[4793]: I0127 20:22:36.817806 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-tqmbv" Jan 27 20:22:52 crc kubenswrapper[4793]: I0127 20:22:52.753534 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:22:52 crc kubenswrapper[4793]: I0127 20:22:52.754699 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.950624 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.952374 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.954839 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.956947 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.957039 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.957968 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cjb2z" Jan 27 20:22:56 crc kubenswrapper[4793]: I0127 20:22:56.969282 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.130462 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djrr4\" (UniqueName: \"kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.130508 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.154667 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.155818 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.164038 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.174230 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.231917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djrr4\" (UniqueName: \"kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.231963 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.233631 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.253467 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djrr4\" (UniqueName: \"kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4\") pod \"dnsmasq-dns-56765d9fd9-65zch\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.273177 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.334436 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.334820 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82mxw\" (UniqueName: \"kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.334984 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.436296 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.436623 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82mxw\" (UniqueName: \"kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.438167 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.439105 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.439262 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.456768 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82mxw\" (UniqueName: \"kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw\") pod \"dnsmasq-dns-6cb4b66457-bwhxx\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.487530 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:22:57 crc kubenswrapper[4793]: I0127 20:22:57.711169 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:22:58 crc kubenswrapper[4793]: I0127 20:22:58.235476 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:22:58 crc kubenswrapper[4793]: W0127 20:22:58.242438 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39b624e5_100f_4b63_9afb_e84133bcfaa6.slice/crio-407098aef276ec72c9bb417a9316e709d714b7e43b2dc28cdf0ab4e15834f529 WatchSource:0}: Error finding container 407098aef276ec72c9bb417a9316e709d714b7e43b2dc28cdf0ab4e15834f529: Status 404 returned error can't find the container with id 407098aef276ec72c9bb417a9316e709d714b7e43b2dc28cdf0ab4e15834f529 Jan 27 20:22:58 crc kubenswrapper[4793]: I0127 20:22:58.665603 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" event={"ID":"39b624e5-100f-4b63-9afb-e84133bcfaa6","Type":"ContainerStarted","Data":"407098aef276ec72c9bb417a9316e709d714b7e43b2dc28cdf0ab4e15834f529"} Jan 27 20:22:58 crc kubenswrapper[4793]: I0127 20:22:58.669101 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" event={"ID":"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c","Type":"ContainerStarted","Data":"c5258cca7f5dbf7eb7c9f07b04160eacba82539a67e61f0e72f85e17199c3c2c"} Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.733354 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.845785 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.847104 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.853791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.853879 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts2dr\" (UniqueName: \"kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.853994 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.862366 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.955693 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.955766 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.955838 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts2dr\" (UniqueName: \"kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.957256 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.961470 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:00 crc kubenswrapper[4793]: I0127 20:23:00.994820 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts2dr\" (UniqueName: \"kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr\") pod \"dnsmasq-dns-7c79f67885-95t8g\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:01 crc kubenswrapper[4793]: I0127 20:23:01.174268 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.306319 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.344150 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.349202 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.356412 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.464181 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.464233 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.465108 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64jbz\" (UniqueName: \"kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.542737 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:02 crc kubenswrapper[4793]: W0127 20:23:02.547598 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe27386e_67ce_4b3d_8db8_77df553b6b9c.slice/crio-ae8802252c6cbc67fc2577492cc2ce47eb274979a83595172429f4c5ba20809b WatchSource:0}: Error finding container ae8802252c6cbc67fc2577492cc2ce47eb274979a83595172429f4c5ba20809b: Status 404 returned error can't find the container with id ae8802252c6cbc67fc2577492cc2ce47eb274979a83595172429f4c5ba20809b Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.566336 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64jbz\" (UniqueName: \"kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.566398 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.566423 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.567924 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.568003 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.601371 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64jbz\" (UniqueName: \"kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz\") pod \"dnsmasq-dns-7899f7d95c-t6js7\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:02 crc kubenswrapper[4793]: I0127 20:23:02.746508 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.011114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" event={"ID":"be27386e-67ce-4b3d-8db8-77df553b6b9c","Type":"ContainerStarted","Data":"ae8802252c6cbc67fc2577492cc2ce47eb274979a83595172429f4c5ba20809b"} Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.074660 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.076785 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.079814 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.084715 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.085163 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.085360 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.085515 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.085755 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.087511 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-fp72z" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.097363 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.176992 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177063 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177107 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177140 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177452 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177566 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4s6f\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177598 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177630 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177670 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.177751 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.178004 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281026 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4s6f\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281175 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281276 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281382 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281459 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281532 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281651 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281709 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281773 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.281834 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.282005 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.284782 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.292346 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:03 crc kubenswrapper[4793]: I0127 20:23:03.295050 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.967640 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.973112 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.973995 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.980203 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.980955 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.989677 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4s6f\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:03.997754 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:04.009908 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:04.114083 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:04.577072 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:23:04 crc kubenswrapper[4793]: I0127 20:23:04.692810 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.399031 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" event={"ID":"9070a599-5105-4e4b-85df-dde3b1885bcb","Type":"ContainerStarted","Data":"aa075f0e31d487a4191f4ac6a90776d9833b9e2d29f4689a68402ee04e9efcd9"} Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.940433 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.943340 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.943513 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.945518 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.945629 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:05 crc kubenswrapper[4793]: I0127 20:23:05.951360 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.025807 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026045 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026097 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026454 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026719 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-6kfxq" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026855 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.026976 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.031300 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.041075 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kg5s2" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.041544 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.042051 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.042277 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.077937 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.077987 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078018 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078037 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078057 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078090 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078114 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078146 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078165 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078189 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kolla-config\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078208 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-generated\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078226 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078248 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjjfq\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078266 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078286 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-operator-scripts\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078304 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9vhn\" (UniqueName: \"kubernetes.io/projected/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kube-api-access-s9vhn\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078321 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-default\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078342 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.078391 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.133487 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590607 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590663 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590705 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590725 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590752 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kolla-config\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590771 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-generated\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590797 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590829 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590846 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjjfq\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590871 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-operator-scripts\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590895 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9vhn\" (UniqueName: \"kubernetes.io/projected/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kube-api-access-s9vhn\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-default\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590946 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.590988 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591021 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591047 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591081 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591110 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591137 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.591563 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-generated\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.599854 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.602906 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.603608 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.603815 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.606264 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.606999 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.607523 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kolla-config\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.613831 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.616727 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.617992 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.621765 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-config-data-default\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.622930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.623124 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.633438 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.638177 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.643764 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.644737 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.645022 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9vhn\" (UniqueName: \"kubernetes.io/projected/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-kube-api-access-s9vhn\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.670684 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974-operator-scripts\") pod \"openstack-galera-0\" (UID: \"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974\") " pod="openstack/openstack-galera-0" Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.677439 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:06 crc kubenswrapper[4793]: I0127 20:23:06.678299 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjjfq\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq\") pod \"rabbitmq-cell1-server-0\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.686166 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.694879 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.694964 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.695025 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47vfj\" (UniqueName: \"kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.706156 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.800263 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47vfj\" (UniqueName: \"kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.800809 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:06.800894 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.023974 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.024719 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.026293 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.041291 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.095511 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.139993 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47vfj\" (UniqueName: \"kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj\") pod \"dnsmasq-dns-77dcc586cc-dt95k\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.347223 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.348401 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.389149 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.389498 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-4s6d5" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.389780 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.390608 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.488652 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.517070 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-config-data\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.517752 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-combined-ca-bundle\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.517791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-memcached-tls-certs\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.517844 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-kolla-config\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.518218 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st72n\" (UniqueName: \"kubernetes.io/projected/da5aef81-265e-457b-bd86-b770db112298-kube-api-access-st72n\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.878906 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-config-data\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.878975 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-combined-ca-bundle\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.879005 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-memcached-tls-certs\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.879051 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-kolla-config\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.879124 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st72n\" (UniqueName: \"kubernetes.io/projected/da5aef81-265e-457b-bd86-b770db112298-kube-api-access-st72n\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.880223 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-config-data\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:07 crc kubenswrapper[4793]: I0127 20:23:07.944076 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerStarted","Data":"fb17f389b32917d7d96e65262b1065b09823ad457d4d8a304db978269f914b3e"} Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.009915 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st72n\" (UniqueName: \"kubernetes.io/projected/da5aef81-265e-457b-bd86-b770db112298-kube-api-access-st72n\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.010955 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/da5aef81-265e-457b-bd86-b770db112298-kolla-config\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.021730 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-combined-ca-bundle\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.023071 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/da5aef81-265e-457b-bd86-b770db112298-memcached-tls-certs\") pod \"memcached-0\" (UID: \"da5aef81-265e-457b-bd86-b770db112298\") " pod="openstack/memcached-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.039930 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.143485 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.162641 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.162829 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.171957 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.172841 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.173105 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.173266 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-rhxpm" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218121 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218229 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218267 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218312 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218351 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218442 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218625 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xd7l\" (UniqueName: \"kubernetes.io/projected/cd405c61-4515-4524-8b4c-c30fcc225b3b-kube-api-access-5xd7l\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.218702 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.330898 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331375 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331497 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xd7l\" (UniqueName: \"kubernetes.io/projected/cd405c61-4515-4524-8b4c-c30fcc225b3b-kube-api-access-5xd7l\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331563 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331604 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331644 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.331674 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.792957 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.793912 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.811410 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd405c61-4515-4524-8b4c-c30fcc225b3b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.822142 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.822710 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cd405c61-4515-4524-8b4c-c30fcc225b3b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.824417 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xd7l\" (UniqueName: \"kubernetes.io/projected/cd405c61-4515-4524-8b4c-c30fcc225b3b-kube-api-access-5xd7l\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.845689 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.847178 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.847354 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd405c61-4515-4524-8b4c-c30fcc225b3b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.851817 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.852222 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.859742 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.860402 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.860671 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-6jkp6" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.860907 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.861085 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.863992 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.864148 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.913560 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 27 20:23:08 crc kubenswrapper[4793]: I0127 20:23:08.953013 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cd405c61-4515-4524-8b4c-c30fcc225b3b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.424695 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.429918 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.429958 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.429990 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430005 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d13b401d-8f36-4677-b782-ebf9a3d5daab-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430041 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430066 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d13b401d-8f36-4677-b782-ebf9a3d5daab-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430082 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k9jg\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-kube-api-access-8k9jg\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430096 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430111 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430151 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:09 crc kubenswrapper[4793]: I0127 20:23:09.430168 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.001852 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.001969 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002061 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002106 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d13b401d-8f36-4677-b782-ebf9a3d5daab-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002189 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002238 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d13b401d-8f36-4677-b782-ebf9a3d5daab-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002278 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k9jg\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-kube-api-access-8k9jg\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002310 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002348 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002402 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.002449 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.048175 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.049756 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.054809 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.055184 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d13b401d-8f36-4677-b782-ebf9a3d5daab-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.055563 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.055930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.067294 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.068133 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.074956 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d13b401d-8f36-4677-b782-ebf9a3d5daab-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.105314 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k9jg\" (UniqueName: \"kubernetes.io/projected/d13b401d-8f36-4677-b782-ebf9a3d5daab-kube-api-access-8k9jg\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.109325 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d13b401d-8f36-4677-b782-ebf9a3d5daab-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.208449 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.242514 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.255111 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"d13b401d-8f36-4677-b782-ebf9a3d5daab\") " pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.389503 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.580446 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.695327 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:10 crc kubenswrapper[4793]: W0127 20:23:10.728818 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45bc2ffa_4a80_415c_abc3_3aa3d336c4fc.slice/crio-eca29e1d0be096d712e659d061a625f6bcc773f97d9b75021fc42de2414e8443 WatchSource:0}: Error finding container eca29e1d0be096d712e659d061a625f6bcc773f97d9b75021fc42de2414e8443: Status 404 returned error can't find the container with id eca29e1d0be096d712e659d061a625f6bcc773f97d9b75021fc42de2414e8443 Jan 27 20:23:10 crc kubenswrapper[4793]: I0127 20:23:10.736227 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 20:23:10 crc kubenswrapper[4793]: W0127 20:23:10.796350 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd405c61_4515_4524_8b4c_c30fcc225b3b.slice/crio-03e440d87eb085cffaa7bdcc29536f191fb89863536e5e9023d2eb1d0ba88cab WatchSource:0}: Error finding container 03e440d87eb085cffaa7bdcc29536f191fb89863536e5e9023d2eb1d0ba88cab: Status 404 returned error can't find the container with id 03e440d87eb085cffaa7bdcc29536f191fb89863536e5e9023d2eb1d0ba88cab Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.173947 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" event={"ID":"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc","Type":"ContainerStarted","Data":"eca29e1d0be096d712e659d061a625f6bcc773f97d9b75021fc42de2414e8443"} Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.177139 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerStarted","Data":"38b0f4e57647aeced9bf653adf9e87972c42db43af04247addd23e43ac07fe02"} Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.181037 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"da5aef81-265e-457b-bd86-b770db112298","Type":"ContainerStarted","Data":"f8843a2cd0086dd7e699be2ed1fffd8c1dd48ecb891c47fe7446fb9609a11f9f"} Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.208896 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.211940 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.220838 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-zl2g2" Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.226441 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cd405c61-4515-4524-8b4c-c30fcc225b3b","Type":"ContainerStarted","Data":"03e440d87eb085cffaa7bdcc29536f191fb89863536e5e9023d2eb1d0ba88cab"} Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.227090 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.230083 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974","Type":"ContainerStarted","Data":"f6568bc05e44cdb1c4e3ec120abf77238027ede06f48abd41bc0709ed152a537"} Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.285138 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwk9t\" (UniqueName: \"kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t\") pod \"kube-state-metrics-0\" (UID: \"58b757ab-b790-4b71-888f-49d52dc5e80d\") " pod="openstack/kube-state-metrics-0" Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.388372 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwk9t\" (UniqueName: \"kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t\") pod \"kube-state-metrics-0\" (UID: \"58b757ab-b790-4b71-888f-49d52dc5e80d\") " pod="openstack/kube-state-metrics-0" Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.418775 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.442957 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwk9t\" (UniqueName: \"kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t\") pod \"kube-state-metrics-0\" (UID: \"58b757ab-b790-4b71-888f-49d52dc5e80d\") " pod="openstack/kube-state-metrics-0" Jan 27 20:23:11 crc kubenswrapper[4793]: I0127 20:23:11.601288 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.564176 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"d13b401d-8f36-4677-b782-ebf9a3d5daab","Type":"ContainerStarted","Data":"061cbd302cb9ea2bf4b4fb37c49d383e062b481b69ae49360cec0fa8e1da862f"} Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.937068 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fxmkt"] Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.938644 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.950357 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.959382 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 27 20:23:12 crc kubenswrapper[4793]: I0127 20:23:12.959597 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-z7tsp" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.032976 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033039 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-log-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033073 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/590c61a6-8355-4f4f-be2b-4680745b4732-scripts\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033116 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc9n2\" (UniqueName: \"kubernetes.io/projected/590c61a6-8355-4f4f-be2b-4680745b4732-kube-api-access-qc9n2\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033142 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033194 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-combined-ca-bundle\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.033263 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-ovn-controller-tls-certs\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.032984 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt"] Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.070104 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137401 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-ovn-controller-tls-certs\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137496 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137525 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-log-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137574 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/590c61a6-8355-4f4f-be2b-4680745b4732-scripts\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137617 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc9n2\" (UniqueName: \"kubernetes.io/projected/590c61a6-8355-4f4f-be2b-4680745b4732-kube-api-access-qc9n2\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137642 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.137692 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-combined-ca-bundle\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.138142 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.138218 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-run-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.138283 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/590c61a6-8355-4f4f-be2b-4680745b4732-var-log-ovn\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.140173 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/590c61a6-8355-4f4f-be2b-4680745b4732-scripts\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.168596 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc9n2\" (UniqueName: \"kubernetes.io/projected/590c61a6-8355-4f4f-be2b-4680745b4732-kube-api-access-qc9n2\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.169096 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-ovn-controller-tls-certs\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.172964 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-rrm85"] Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.175470 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.178722 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/590c61a6-8355-4f4f-be2b-4680745b4732-combined-ca-bundle\") pod \"ovn-controller-fxmkt\" (UID: \"590c61a6-8355-4f4f-be2b-4680745b4732\") " pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.223879 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-rrm85"] Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.316797 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.340793 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-scripts\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.341123 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-log\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.341405 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzmpk\" (UniqueName: \"kubernetes.io/projected/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-kube-api-access-hzmpk\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.341448 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-run\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.341576 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-lib\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.341597 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-etc-ovs\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576177 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzmpk\" (UniqueName: \"kubernetes.io/projected/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-kube-api-access-hzmpk\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576245 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-run\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576363 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-lib\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576392 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-etc-ovs\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576446 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-scripts\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576487 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-log\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.576957 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-log\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.577357 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-run\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.577534 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-var-lib\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.577720 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-etc-ovs\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.580759 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-scripts\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.610625 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzmpk\" (UniqueName: \"kubernetes.io/projected/e8926f6a-2dd1-4b4d-912b-8f11e9c51832-kube-api-access-hzmpk\") pod \"ovn-controller-ovs-rrm85\" (UID: \"e8926f6a-2dd1-4b4d-912b-8f11e9c51832\") " pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.625532 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:13 crc kubenswrapper[4793]: I0127 20:23:13.934658 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"58b757ab-b790-4b71-888f-49d52dc5e80d","Type":"ContainerStarted","Data":"e02baa07974a26fa24c1db4eddba5dd64c74e6f6ce79d96ff73373a40c9e27da"} Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.647392 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.650917 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.674943 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.678207 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.679872 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.680101 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.680275 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.680378 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.801697 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.801972 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-h8vj4" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.805488 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919576 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919654 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919724 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919757 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919799 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919826 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919851 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919896 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5hnc\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.919953 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:14 crc kubenswrapper[4793]: I0127 20:23:14.920950 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.100983 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102185 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102282 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102329 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102383 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102418 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102468 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102498 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102536 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.102633 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5hnc\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.103836 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.104498 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.104511 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.115853 4793 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.115897 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8312f000923a56e203c1e13376862fe23daea3f78fa537b754b51784691fd00c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.116255 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.116290 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.117360 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.119527 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.159416 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.170838 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5hnc\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.173501 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt"] Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.217575 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.279102 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.753173 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-rrm85"] Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.953429 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-v5zns"] Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.959861 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.963168 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.963780 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 27 20:23:15 crc kubenswrapper[4793]: I0127 20:23:15.981122 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v5zns"] Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085522 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovn-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085624 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp2zr\" (UniqueName: \"kubernetes.io/projected/dd80fd2c-f25f-4077-8860-e7296040a46f-kube-api-access-rp2zr\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085699 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd80fd2c-f25f-4077-8860-e7296040a46f-config\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085764 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovs-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085865 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.085915 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-combined-ca-bundle\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206265 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovn-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206332 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp2zr\" (UniqueName: \"kubernetes.io/projected/dd80fd2c-f25f-4077-8860-e7296040a46f-kube-api-access-rp2zr\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206386 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd80fd2c-f25f-4077-8860-e7296040a46f-config\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206434 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovs-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206501 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.206638 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-combined-ca-bundle\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.215811 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-combined-ca-bundle\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.217574 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovn-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.218759 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd80fd2c-f25f-4077-8860-e7296040a46f-config\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.218858 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dd80fd2c-f25f-4077-8860-e7296040a46f-ovs-rundir\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.223605 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd80fd2c-f25f-4077-8860-e7296040a46f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.332511 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp2zr\" (UniqueName: \"kubernetes.io/projected/dd80fd2c-f25f-4077-8860-e7296040a46f-kube-api-access-rp2zr\") pod \"ovn-controller-metrics-v5zns\" (UID: \"dd80fd2c-f25f-4077-8860-e7296040a46f\") " pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:16 crc kubenswrapper[4793]: I0127 20:23:16.658717 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v5zns" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.189910 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.191726 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.197964 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.200021 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.200513 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.200728 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-zn452" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.203658 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.623600 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.625669 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.627321 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.628992 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-l58c8" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.629117 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.629293 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.652165 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.684854 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58054670-14c6-4c95-8791-edb32ef325da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685152 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9hbd\" (UniqueName: \"kubernetes.io/projected/58054670-14c6-4c95-8791-edb32ef325da-kube-api-access-c9hbd\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685261 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685382 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685512 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685660 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685729 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.685803 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-config\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803186 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-config\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803258 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58054670-14c6-4c95-8791-edb32ef325da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803296 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqlsk\" (UniqueName: \"kubernetes.io/projected/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-kube-api-access-jqlsk\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803322 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803366 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9hbd\" (UniqueName: \"kubernetes.io/projected/58054670-14c6-4c95-8791-edb32ef325da-kube-api-access-c9hbd\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803403 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803439 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803461 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803495 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803536 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803586 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803632 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803656 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803694 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803723 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-config\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.803756 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.804493 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58054670-14c6-4c95-8791-edb32ef325da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.814287 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.814637 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.852212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.859777 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.860848 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58054670-14c6-4c95-8791-edb32ef325da-config\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.868230 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58054670-14c6-4c95-8791-edb32ef325da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.869228 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9hbd\" (UniqueName: \"kubernetes.io/projected/58054670-14c6-4c95-8791-edb32ef325da-kube-api-access-c9hbd\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.871749 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"58054670-14c6-4c95-8791-edb32ef325da\") " pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905131 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905290 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905345 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905403 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-config\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905439 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqlsk\" (UniqueName: \"kubernetes.io/projected/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-kube-api-access-jqlsk\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905457 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905515 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.905533 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.906217 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.993480 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-config\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:17 crc kubenswrapper[4793]: I0127 20:23:17.993618 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.441212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.442396 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.443369 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.462697 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.488662 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.676387 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.838872 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqlsk\" (UniqueName: \"kubernetes.io/projected/6e42416e-bfb3-4e3e-a640-fe1bcbb54928-kube-api-access-jqlsk\") pod \"ovsdbserver-sb-0\" (UID: \"6e42416e-bfb3-4e3e-a640-fe1bcbb54928\") " pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:18 crc kubenswrapper[4793]: W0127 20:23:18.856695 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8926f6a_2dd1_4b4d_912b_8f11e9c51832.slice/crio-1aae26af399a70a70a8a0353ff2ec52189f6db4c4f1b134ffe4d88f68f32b716 WatchSource:0}: Error finding container 1aae26af399a70a70a8a0353ff2ec52189f6db4c4f1b134ffe4d88f68f32b716: Status 404 returned error can't find the container with id 1aae26af399a70a70a8a0353ff2ec52189f6db4c4f1b134ffe4d88f68f32b716 Jan 27 20:23:18 crc kubenswrapper[4793]: I0127 20:23:18.861729 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:19 crc kubenswrapper[4793]: I0127 20:23:19.523094 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt" event={"ID":"590c61a6-8355-4f4f-be2b-4680745b4732","Type":"ContainerStarted","Data":"57e9326d46d1f859705fa2d02543ba4677ac1723f64b275788a7cef627c5d7d1"} Jan 27 20:23:19 crc kubenswrapper[4793]: I0127 20:23:19.525341 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rrm85" event={"ID":"e8926f6a-2dd1-4b4d-912b-8f11e9c51832","Type":"ContainerStarted","Data":"1aae26af399a70a70a8a0353ff2ec52189f6db4c4f1b134ffe4d88f68f32b716"} Jan 27 20:23:22 crc kubenswrapper[4793]: I0127 20:23:22.863309 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:23:22 crc kubenswrapper[4793]: I0127 20:23:22.863624 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:23:30 crc kubenswrapper[4793]: I0127 20:23:30.150131 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:23:34 crc kubenswrapper[4793]: W0127 20:23:34.727385 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda290e10d_fe47_4a6c_bb5c_e5306fefd090.slice/crio-95b8c9d6e3750a3e5652347263911dfc1d993ab9444ada3e03519c7300c56696 WatchSource:0}: Error finding container 95b8c9d6e3750a3e5652347263911dfc1d993ab9444ada3e03519c7300c56696: Status 404 returned error can't find the container with id 95b8c9d6e3750a3e5652347263911dfc1d993ab9444ada3e03519c7300c56696 Jan 27 20:23:35 crc kubenswrapper[4793]: I0127 20:23:35.090438 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerStarted","Data":"95b8c9d6e3750a3e5652347263911dfc1d993ab9444ada3e03519c7300c56696"} Jan 27 20:23:35 crc kubenswrapper[4793]: I0127 20:23:35.588196 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 20:23:36 crc kubenswrapper[4793]: E0127 20:23:36.066602 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-ovn-base:watcher_latest" Jan 27 20:23:36 crc kubenswrapper[4793]: E0127 20:23:36.066657 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-ovn-base:watcher_latest" Jan 27 20:23:36 crc kubenswrapper[4793]: E0127 20:23:36.066785 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:38.102.83.195:5001/podified-master-centos10/openstack-ovn-base:watcher_latest,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf8h55ch674h8ch5bbh548h66bh5f6h5fdh55bhcdhd6h68bh5c9h649hd8h58fh675h57bhb6h68fh67fh5dbhc6h58bh5dbh646hf8h57hd4h65fhccq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hzmpk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-rrm85_openstack(e8926f6a-2dd1-4b4d-912b-8f11e9c51832): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:36 crc kubenswrapper[4793]: E0127 20:23:36.068645 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-rrm85" podUID="e8926f6a-2dd1-4b4d-912b-8f11e9c51832" Jan 27 20:23:36 crc kubenswrapper[4793]: E0127 20:23:36.100724 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-ovn-base:watcher_latest\\\"\"" pod="openstack/ovn-controller-ovs-rrm85" podUID="e8926f6a-2dd1-4b4d-912b-8f11e9c51832" Jan 27 20:23:36 crc kubenswrapper[4793]: I0127 20:23:36.490112 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v5zns"] Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.295265 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.295799 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.295965 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fjjfq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(be1afc99-1852-4e3b-a2e7-e9beab138334): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.297203 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.314758 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.314812 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.314941 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q4s6f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(986a3fd9-9573-46d3-a71d-b3fbe5fd87f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.316138 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.687470 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-memcached:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.687805 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-memcached:watcher_latest" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.687986 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:38.102.83.195:5001/podified-master-centos10/openstack-memcached:watcher_latest,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n7bhb6hdh677h647h6fh588hfbh645hd6h58dhf4hf4h594hbhd9hdh5d9h676h554h97h599h568h6bh564h5f7h56fh665hdfh67chbbhd9q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-st72n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(da5aef81-265e-457b-bd86-b770db112298): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:42 crc kubenswrapper[4793]: E0127 20:23:42.689680 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="da5aef81-265e-457b-bd86-b770db112298" Jan 27 20:23:43 crc kubenswrapper[4793]: I0127 20:23:43.146959 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"58054670-14c6-4c95-8791-edb32ef325da","Type":"ContainerStarted","Data":"6245d4d7f3a1466272f271816d72caa0017644bd3708a20006b67c442589fe2c"} Jan 27 20:23:43 crc kubenswrapper[4793]: I0127 20:23:43.148332 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v5zns" event={"ID":"dd80fd2c-f25f-4077-8860-e7296040a46f","Type":"ContainerStarted","Data":"291be82eb50cf10a8e4a9c84116ba067d7d37aec33cabf36c5217d473d78a119"} Jan 27 20:23:43 crc kubenswrapper[4793]: E0127 20:23:43.149776 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" Jan 27 20:23:43 crc kubenswrapper[4793]: E0127 20:23:43.150208 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" Jan 27 20:23:43 crc kubenswrapper[4793]: E0127 20:23:43.150326 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-memcached:watcher_latest\\\"\"" pod="openstack/memcached-0" podUID="da5aef81-265e-457b-bd86-b770db112298" Jan 27 20:23:43 crc kubenswrapper[4793]: I0127 20:23:43.429972 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 20:23:47 crc kubenswrapper[4793]: E0127 20:23:47.820081 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:47 crc kubenswrapper[4793]: E0127 20:23:47.820376 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:47 crc kubenswrapper[4793]: E0127 20:23:47.820476 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-djrr4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-56765d9fd9-65zch_openstack(9c46d06f-cd04-4ccb-9fff-64af1fd94f9c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:47 crc kubenswrapper[4793]: E0127 20:23:47.821810 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" podUID="9c46d06f-cd04-4ccb-9fff-64af1fd94f9c" Jan 27 20:23:48 crc kubenswrapper[4793]: W0127 20:23:48.131476 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e42416e_bfb3_4e3e_a640_fe1bcbb54928.slice/crio-8d48f351e853e49ff00ba7f63c11b0fba2f24c638b21026e949b2e6454db0118 WatchSource:0}: Error finding container 8d48f351e853e49ff00ba7f63c11b0fba2f24c638b21026e949b2e6454db0118: Status 404 returned error can't find the container with id 8d48f351e853e49ff00ba7f63c11b0fba2f24c638b21026e949b2e6454db0118 Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.185664 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6e42416e-bfb3-4e3e-a640-fe1bcbb54928","Type":"ContainerStarted","Data":"8d48f351e853e49ff00ba7f63c11b0fba2f24c638b21026e949b2e6454db0118"} Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.214746 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.214789 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.214880 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ts2dr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7c79f67885-95t8g_openstack(be27386e-67ce-4b3d-8db8-77df553b6b9c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.216180 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" podUID="be27386e-67ce-4b3d-8db8-77df553b6b9c" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.222963 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.223010 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.223130 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-64jbz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7899f7d95c-t6js7_openstack(9070a599-5105-4e4b-85df-dde3b1885bcb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.224349 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" podUID="9070a599-5105-4e4b-85df-dde3b1885bcb" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.268685 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.268754 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.268899 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-82mxw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6cb4b66457-bwhxx_openstack(39b624e5-100f-4b63-9afb-e84133bcfaa6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.270120 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.270181 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.270136 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" podUID="39b624e5-100f-4b63-9afb-e84133bcfaa6" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.270326 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h56dh5cfh8bh54fhbbhf4h5b9hdch67fhd7h55fh55fh6ch9h548h54ch665h647h6h8fhd6h5dfh5cdh58bh577h66fh695h5fbh55h77h5fcq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-47vfj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-77dcc586cc-dt95k_openstack(45bc2ffa-4a80-415c-abc3-3aa3d336c4fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:23:48 crc kubenswrapper[4793]: E0127 20:23:48.271492 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" podUID="45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.478670 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.616742 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config\") pod \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.616884 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djrr4\" (UniqueName: \"kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4\") pod \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\" (UID: \"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c\") " Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.617406 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config" (OuterVolumeSpecName: "config") pod "9c46d06f-cd04-4ccb-9fff-64af1fd94f9c" (UID: "9c46d06f-cd04-4ccb-9fff-64af1fd94f9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.621468 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4" (OuterVolumeSpecName: "kube-api-access-djrr4") pod "9c46d06f-cd04-4ccb-9fff-64af1fd94f9c" (UID: "9c46d06f-cd04-4ccb-9fff-64af1fd94f9c"). InnerVolumeSpecName "kube-api-access-djrr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.718467 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:48 crc kubenswrapper[4793]: I0127 20:23:48.718498 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djrr4\" (UniqueName: \"kubernetes.io/projected/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c-kube-api-access-djrr4\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:49 crc kubenswrapper[4793]: I0127 20:23:49.194309 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" event={"ID":"9c46d06f-cd04-4ccb-9fff-64af1fd94f9c","Type":"ContainerDied","Data":"c5258cca7f5dbf7eb7c9f07b04160eacba82539a67e61f0e72f85e17199c3c2c"} Jan 27 20:23:49 crc kubenswrapper[4793]: I0127 20:23:49.194390 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56765d9fd9-65zch" Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.196818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" podUID="9070a599-5105-4e4b-85df-dde3b1885bcb" Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.197483 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" podUID="45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" Jan 27 20:23:49 crc kubenswrapper[4793]: I0127 20:23:49.350275 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:23:49 crc kubenswrapper[4793]: I0127 20:23:49.365967 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56765d9fd9-65zch"] Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.684806 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.684877 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.685023 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bwk9t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(58b757ab-b790-4b71-888f-49d52dc5e80d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 20:23:49 crc kubenswrapper[4793]: E0127 20:23:49.686437 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" Jan 27 20:23:49 crc kubenswrapper[4793]: I0127 20:23:49.813378 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c46d06f-cd04-4ccb-9fff-64af1fd94f9c" path="/var/lib/kubelet/pods/9c46d06f-cd04-4ccb-9fff-64af1fd94f9c/volumes" Jan 27 20:23:50 crc kubenswrapper[4793]: E0127 20:23:50.204067 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.729133 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.739857 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.859787 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config\") pod \"39b624e5-100f-4b63-9afb-e84133bcfaa6\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.859922 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82mxw\" (UniqueName: \"kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw\") pod \"39b624e5-100f-4b63-9afb-e84133bcfaa6\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860036 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc\") pod \"be27386e-67ce-4b3d-8db8-77df553b6b9c\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860073 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc\") pod \"39b624e5-100f-4b63-9afb-e84133bcfaa6\" (UID: \"39b624e5-100f-4b63-9afb-e84133bcfaa6\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860096 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts2dr\" (UniqueName: \"kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr\") pod \"be27386e-67ce-4b3d-8db8-77df553b6b9c\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860134 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config\") pod \"be27386e-67ce-4b3d-8db8-77df553b6b9c\" (UID: \"be27386e-67ce-4b3d-8db8-77df553b6b9c\") " Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860430 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config" (OuterVolumeSpecName: "config") pod "39b624e5-100f-4b63-9afb-e84133bcfaa6" (UID: "39b624e5-100f-4b63-9afb-e84133bcfaa6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.860963 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "39b624e5-100f-4b63-9afb-e84133bcfaa6" (UID: "39b624e5-100f-4b63-9afb-e84133bcfaa6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.861063 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config" (OuterVolumeSpecName: "config") pod "be27386e-67ce-4b3d-8db8-77df553b6b9c" (UID: "be27386e-67ce-4b3d-8db8-77df553b6b9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.861330 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "be27386e-67ce-4b3d-8db8-77df553b6b9c" (UID: "be27386e-67ce-4b3d-8db8-77df553b6b9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.864429 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw" (OuterVolumeSpecName: "kube-api-access-82mxw") pod "39b624e5-100f-4b63-9afb-e84133bcfaa6" (UID: "39b624e5-100f-4b63-9afb-e84133bcfaa6"). InnerVolumeSpecName "kube-api-access-82mxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.864691 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr" (OuterVolumeSpecName: "kube-api-access-ts2dr") pod "be27386e-67ce-4b3d-8db8-77df553b6b9c" (UID: "be27386e-67ce-4b3d-8db8-77df553b6b9c"). InnerVolumeSpecName "kube-api-access-ts2dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962346 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82mxw\" (UniqueName: \"kubernetes.io/projected/39b624e5-100f-4b63-9afb-e84133bcfaa6-kube-api-access-82mxw\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962384 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962398 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962410 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts2dr\" (UniqueName: \"kubernetes.io/projected/be27386e-67ce-4b3d-8db8-77df553b6b9c-kube-api-access-ts2dr\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962423 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be27386e-67ce-4b3d-8db8-77df553b6b9c-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:50 crc kubenswrapper[4793]: I0127 20:23:50.962433 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b624e5-100f-4b63-9afb-e84133bcfaa6-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.210799 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" event={"ID":"39b624e5-100f-4b63-9afb-e84133bcfaa6","Type":"ContainerDied","Data":"407098aef276ec72c9bb417a9316e709d714b7e43b2dc28cdf0ab4e15834f529"} Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.210836 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb4b66457-bwhxx" Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.212325 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" event={"ID":"be27386e-67ce-4b3d-8db8-77df553b6b9c","Type":"ContainerDied","Data":"ae8802252c6cbc67fc2577492cc2ce47eb274979a83595172429f4c5ba20809b"} Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.212404 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c79f67885-95t8g" Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.345715 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.353110 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cb4b66457-bwhxx"] Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.382403 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.412465 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c79f67885-95t8g"] Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.812170 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39b624e5-100f-4b63-9afb-e84133bcfaa6" path="/var/lib/kubelet/pods/39b624e5-100f-4b63-9afb-e84133bcfaa6/volumes" Jan 27 20:23:51 crc kubenswrapper[4793]: I0127 20:23:51.812590 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be27386e-67ce-4b3d-8db8-77df553b6b9c" path="/var/lib/kubelet/pods/be27386e-67ce-4b3d-8db8-77df553b6b9c/volumes" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.224834 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt" event={"ID":"590c61a6-8355-4f4f-be2b-4680745b4732","Type":"ContainerStarted","Data":"16336761937ba65825ef34d318f49ec516956f28179cf40495b3cf3c7a7e4b68"} Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.225224 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-fxmkt" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.227677 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"58054670-14c6-4c95-8791-edb32ef325da","Type":"ContainerStarted","Data":"20cbad4a2fea0bd38ea92e31e774a901f08413ad092fba219c08136940c44158"} Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.229714 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cd405c61-4515-4524-8b4c-c30fcc225b3b","Type":"ContainerStarted","Data":"c019218c7ff62eec93e4e65050f60e4e0fd4ff1e8d3c72db1bc218646478bf5d"} Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.231303 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974","Type":"ContainerStarted","Data":"79c15fb06d8895bdca3f4d7abeab791e7d2b3ae90671f9dbdabae39aa70f1163"} Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.233133 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6e42416e-bfb3-4e3e-a640-fe1bcbb54928","Type":"ContainerStarted","Data":"fdbda020d26d6fbcdb6ed15dbb52718cf17d157d5f5abac9f5b312bbb45612a8"} Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.251413 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-fxmkt" podStartSLOduration=10.691031322 podStartE2EDuration="40.251392663s" podCreationTimestamp="2026-01-27 20:23:12 +0000 UTC" firstStartedPulling="2026-01-27 20:23:18.866957226 +0000 UTC m=+1224.257210422" lastFinishedPulling="2026-01-27 20:23:48.427318607 +0000 UTC m=+1253.817571763" observedRunningTime="2026-01-27 20:23:52.251087385 +0000 UTC m=+1257.641340541" watchObservedRunningTime="2026-01-27 20:23:52.251392663 +0000 UTC m=+1257.641645819" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.753431 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.753728 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.753838 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.754682 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:23:52 crc kubenswrapper[4793]: I0127 20:23:52.754827 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296" gracePeriod=600 Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.244569 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"58054670-14c6-4c95-8791-edb32ef325da","Type":"ContainerStarted","Data":"bbeb6033429579578942438ec25f77afab51b121f00e91e0a387984c08cfb3b5"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.248712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v5zns" event={"ID":"dd80fd2c-f25f-4077-8860-e7296040a46f","Type":"ContainerStarted","Data":"1e34ce2c3710334fd054b8ae7c888eb0787dd0bbd858c2a9d818b632d9e8333e"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.256927 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296" exitCode=0 Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.257011 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.257048 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.257069 4793 scope.go:117] "RemoveContainer" containerID="c041cd73cb3eb270167a956b00348c4d59b0c9f650876c07addb86b8623f031a" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.260182 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6e42416e-bfb3-4e3e-a640-fe1bcbb54928","Type":"ContainerStarted","Data":"f2bc8bcc8e5d368fb0847d1fd370a589d08ebfb8244f1928ddde0fb3e6fe39e9"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.266447 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"d13b401d-8f36-4677-b782-ebf9a3d5daab","Type":"ContainerStarted","Data":"8c2bb6675873503ce7043b2eb6c32deb18600689c3d2411aad6191687007c533"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.269860 4793 generic.go:334] "Generic (PLEG): container finished" podID="e8926f6a-2dd1-4b4d-912b-8f11e9c51832" containerID="7603580d9c8099c7eb2abf27ae9eb75934e23836d3a2bffe7b66b825ff846d7e" exitCode=0 Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.269926 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rrm85" event={"ID":"e8926f6a-2dd1-4b4d-912b-8f11e9c51832","Type":"ContainerDied","Data":"7603580d9c8099c7eb2abf27ae9eb75934e23836d3a2bffe7b66b825ff846d7e"} Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.270978 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=29.30973454 podStartE2EDuration="37.270957593s" podCreationTimestamp="2026-01-27 20:23:16 +0000 UTC" firstStartedPulling="2026-01-27 20:23:42.679508627 +0000 UTC m=+1248.069761783" lastFinishedPulling="2026-01-27 20:23:50.64073168 +0000 UTC m=+1256.030984836" observedRunningTime="2026-01-27 20:23:53.2643206 +0000 UTC m=+1258.654573756" watchObservedRunningTime="2026-01-27 20:23:53.270957593 +0000 UTC m=+1258.661210739" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.291268 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=34.360030913 podStartE2EDuration="37.291248172s" podCreationTimestamp="2026-01-27 20:23:16 +0000 UTC" firstStartedPulling="2026-01-27 20:23:48.139153447 +0000 UTC m=+1253.529406603" lastFinishedPulling="2026-01-27 20:23:51.070370706 +0000 UTC m=+1256.460623862" observedRunningTime="2026-01-27 20:23:53.285801548 +0000 UTC m=+1258.676054704" watchObservedRunningTime="2026-01-27 20:23:53.291248172 +0000 UTC m=+1258.681501328" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.326044 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-v5zns" podStartSLOduration=30.147779471 podStartE2EDuration="38.326024426s" podCreationTimestamp="2026-01-27 20:23:15 +0000 UTC" firstStartedPulling="2026-01-27 20:23:42.991776309 +0000 UTC m=+1248.382029465" lastFinishedPulling="2026-01-27 20:23:51.170021264 +0000 UTC m=+1256.560274420" observedRunningTime="2026-01-27 20:23:53.322070539 +0000 UTC m=+1258.712323715" watchObservedRunningTime="2026-01-27 20:23:53.326024426 +0000 UTC m=+1258.716277582" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.444042 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.828034 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.848437 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.858525 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.861940 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.863346 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.897723 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.981719 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.981971 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.982021 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:53 crc kubenswrapper[4793]: I0127 20:23:53.982051 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv5hd\" (UniqueName: \"kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.079088 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.087713 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.088090 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.088120 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv5hd\" (UniqueName: \"kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.088200 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.089174 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.089222 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.089825 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.152971 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv5hd\" (UniqueName: \"kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd\") pod \"dnsmasq-dns-78c56b79fc-tv48b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.182077 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.184057 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.186475 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.190845 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.193705 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.285951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rrm85" event={"ID":"e8926f6a-2dd1-4b4d-912b-8f11e9c51832","Type":"ContainerStarted","Data":"c044d8fb0057cb4f03e93a2039d04b48b1055aa62c15d5ea8b68e68ce9746721"} Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.289595 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" event={"ID":"9070a599-5105-4e4b-85df-dde3b1885bcb","Type":"ContainerDied","Data":"aa075f0e31d487a4191f4ac6a90776d9833b9e2d29f4689a68402ee04e9efcd9"} Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.289636 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa075f0e31d487a4191f4ac6a90776d9833b9e2d29f4689a68402ee04e9efcd9" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.290437 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.290505 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8dwv\" (UniqueName: \"kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.290599 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.290678 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.290701 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.303114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerStarted","Data":"2f5bc73680dbd5086bded0a32b53b08a40c7302f3f3ec85df280dc0f533103f9"} Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.353276 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.391845 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc\") pod \"9070a599-5105-4e4b-85df-dde3b1885bcb\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.391947 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config\") pod \"9070a599-5105-4e4b-85df-dde3b1885bcb\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.392165 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64jbz\" (UniqueName: \"kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz\") pod \"9070a599-5105-4e4b-85df-dde3b1885bcb\" (UID: \"9070a599-5105-4e4b-85df-dde3b1885bcb\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.392520 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.392657 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.392690 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.393009 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.393211 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8dwv\" (UniqueName: \"kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.393581 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9070a599-5105-4e4b-85df-dde3b1885bcb" (UID: "9070a599-5105-4e4b-85df-dde3b1885bcb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.394348 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config" (OuterVolumeSpecName: "config") pod "9070a599-5105-4e4b-85df-dde3b1885bcb" (UID: "9070a599-5105-4e4b-85df-dde3b1885bcb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.395742 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.396386 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.396927 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.399917 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.411930 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz" (OuterVolumeSpecName: "kube-api-access-64jbz") pod "9070a599-5105-4e4b-85df-dde3b1885bcb" (UID: "9070a599-5105-4e4b-85df-dde3b1885bcb"). InnerVolumeSpecName "kube-api-access-64jbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.418034 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8dwv\" (UniqueName: \"kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv\") pod \"dnsmasq-dns-cb456c8f9-mxg2b\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.443842 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.456059 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.487318 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494147 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47vfj\" (UniqueName: \"kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj\") pod \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494273 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc\") pod \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494419 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config\") pod \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\" (UID: \"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc\") " Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494852 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64jbz\" (UniqueName: \"kubernetes.io/projected/9070a599-5105-4e4b-85df-dde3b1885bcb-kube-api-access-64jbz\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494873 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494907 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9070a599-5105-4e4b-85df-dde3b1885bcb-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.494829 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" (UID: "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.495027 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config" (OuterVolumeSpecName: "config") pod "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" (UID: "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.498963 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj" (OuterVolumeSpecName: "kube-api-access-47vfj") pod "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" (UID: "45bc2ffa-4a80-415c-abc3-3aa3d336c4fc"). InnerVolumeSpecName "kube-api-access-47vfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.597138 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.597185 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47vfj\" (UniqueName: \"kubernetes.io/projected/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-kube-api-access-47vfj\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.597204 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.651636 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.776565 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.863469 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:54 crc kubenswrapper[4793]: I0127 20:23:54.925427 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.130520 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:23:55 crc kubenswrapper[4793]: W0127 20:23:55.134398 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f32f180_5e5c_4b65_a124_684e2620b221.slice/crio-969a996d06760d3e79745e3324a5b356335d50dacd9ef3ba6d0aa8026a155aa2 WatchSource:0}: Error finding container 969a996d06760d3e79745e3324a5b356335d50dacd9ef3ba6d0aa8026a155aa2: Status 404 returned error can't find the container with id 969a996d06760d3e79745e3324a5b356335d50dacd9ef3ba6d0aa8026a155aa2 Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.318629 4793 generic.go:334] "Generic (PLEG): container finished" podID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerID="7fe75d0db99ba9a3eab8da1c1b62fc9dc40ec04ac5ab3ba63d4107e9e6cb0312" exitCode=0 Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.318703 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" event={"ID":"262c27f3-d284-4ae0-ac78-6821ec08221b","Type":"ContainerDied","Data":"7fe75d0db99ba9a3eab8da1c1b62fc9dc40ec04ac5ab3ba63d4107e9e6cb0312"} Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.318733 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" event={"ID":"262c27f3-d284-4ae0-ac78-6821ec08221b","Type":"ContainerStarted","Data":"dd9a8b392281f75e3c6ba4d545b1aab0e2fd544c544f83aafd14d304fa09f2fc"} Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.320946 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" event={"ID":"2f32f180-5e5c-4b65-a124-684e2620b221","Type":"ContainerStarted","Data":"969a996d06760d3e79745e3324a5b356335d50dacd9ef3ba6d0aa8026a155aa2"} Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.322231 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" event={"ID":"45bc2ffa-4a80-415c-abc3-3aa3d336c4fc","Type":"ContainerDied","Data":"eca29e1d0be096d712e659d061a625f6bcc773f97d9b75021fc42de2414e8443"} Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.322285 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dcc586cc-dt95k" Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.332121 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7899f7d95c-t6js7" Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.332638 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rrm85" event={"ID":"e8926f6a-2dd1-4b4d-912b-8f11e9c51832","Type":"ContainerStarted","Data":"41d8d1171977461c5f2341bc80179c70766376d6076e677737a3206cca8303c2"} Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.399296 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.408857 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77dcc586cc-dt95k"] Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.417316 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-rrm85" podStartSLOduration=10.693285156 podStartE2EDuration="42.417302367s" podCreationTimestamp="2026-01-27 20:23:13 +0000 UTC" firstStartedPulling="2026-01-27 20:23:18.910136677 +0000 UTC m=+1224.300389833" lastFinishedPulling="2026-01-27 20:23:50.634153888 +0000 UTC m=+1256.024407044" observedRunningTime="2026-01-27 20:23:55.415971004 +0000 UTC m=+1260.806224150" watchObservedRunningTime="2026-01-27 20:23:55.417302367 +0000 UTC m=+1260.807555513" Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.476624 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.490768 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7899f7d95c-t6js7"] Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.814399 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45bc2ffa-4a80-415c-abc3-3aa3d336c4fc" path="/var/lib/kubelet/pods/45bc2ffa-4a80-415c-abc3-3aa3d336c4fc/volumes" Jan 27 20:23:55 crc kubenswrapper[4793]: I0127 20:23:55.815779 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9070a599-5105-4e4b-85df-dde3b1885bcb" path="/var/lib/kubelet/pods/9070a599-5105-4e4b-85df-dde3b1885bcb/volumes" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.346536 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" event={"ID":"262c27f3-d284-4ae0-ac78-6821ec08221b","Type":"ContainerStarted","Data":"ea204ad89e328d5c7f209730e76b99e2fca755024091d38c59912c3e028ebddf"} Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.346893 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.348583 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"da5aef81-265e-457b-bd86-b770db112298","Type":"ContainerStarted","Data":"747bb1bedebb45551fd455fe1fcb258e320e13f1a2d5b0b573834b413ee8683f"} Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.349029 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.352488 4793 generic.go:334] "Generic (PLEG): container finished" podID="2f32f180-5e5c-4b65-a124-684e2620b221" containerID="6d45b8d849ef0856985497e40bc7eeda14f733fd40ed3941dfe84a98a33cea46" exitCode=0 Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.352704 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" event={"ID":"2f32f180-5e5c-4b65-a124-684e2620b221","Type":"ContainerDied","Data":"6d45b8d849ef0856985497e40bc7eeda14f733fd40ed3941dfe84a98a33cea46"} Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.353575 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.353644 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.489921 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" podStartSLOduration=3.346134828 podStartE2EDuration="3.48990346s" podCreationTimestamp="2026-01-27 20:23:53 +0000 UTC" firstStartedPulling="2026-01-27 20:23:54.785388542 +0000 UTC m=+1260.175641698" lastFinishedPulling="2026-01-27 20:23:54.929157164 +0000 UTC m=+1260.319410330" observedRunningTime="2026-01-27 20:23:56.370820484 +0000 UTC m=+1261.761073650" watchObservedRunningTime="2026-01-27 20:23:56.48990346 +0000 UTC m=+1261.880156616" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.521205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.523693 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.530988 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=4.293539082 podStartE2EDuration="49.53096615s" podCreationTimestamp="2026-01-27 20:23:07 +0000 UTC" firstStartedPulling="2026-01-27 20:23:10.661514593 +0000 UTC m=+1216.051767749" lastFinishedPulling="2026-01-27 20:23:55.898941661 +0000 UTC m=+1261.289194817" observedRunningTime="2026-01-27 20:23:56.505335849 +0000 UTC m=+1261.895589025" watchObservedRunningTime="2026-01-27 20:23:56.53096615 +0000 UTC m=+1261.921219306" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.934755 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.936030 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.938701 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.938972 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-8v6x7" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.939223 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.944165 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 27 20:23:56 crc kubenswrapper[4793]: I0127 20:23:56.963859 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103220 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v5xc\" (UniqueName: \"kubernetes.io/projected/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-kube-api-access-8v5xc\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103332 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-config\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103381 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103428 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103473 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-scripts\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.103506 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204414 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204487 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204515 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-scripts\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204534 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204630 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v5xc\" (UniqueName: \"kubernetes.io/projected/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-kube-api-access-8v5xc\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204664 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.204704 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-config\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.218964 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.224543 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.225791 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-scripts\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.226083 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.226674 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-config\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.228139 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.232475 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v5xc\" (UniqueName: \"kubernetes.io/projected/08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda-kube-api-access-8v5xc\") pod \"ovn-northd-0\" (UID: \"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda\") " pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.285893 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.367101 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" event={"ID":"2f32f180-5e5c-4b65-a124-684e2620b221","Type":"ContainerStarted","Data":"6f2f32a0329e95e7cb4c1b463f131c5c692536c3ada7b33c5ddf928afdf46911"} Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.368484 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.370683 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerStarted","Data":"1769161e8634efcc339439e61f08418e2f508bc1000ed8a23a5128fa680a685d"} Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.373664 4793 generic.go:334] "Generic (PLEG): container finished" podID="64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974" containerID="79c15fb06d8895bdca3f4d7abeab791e7d2b3ae90671f9dbdabae39aa70f1163" exitCode=0 Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.374286 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974","Type":"ContainerDied","Data":"79c15fb06d8895bdca3f4d7abeab791e7d2b3ae90671f9dbdabae39aa70f1163"} Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.433288 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" podStartSLOduration=3.433269058 podStartE2EDuration="3.433269058s" podCreationTimestamp="2026-01-27 20:23:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:23:57.407230038 +0000 UTC m=+1262.797483204" watchObservedRunningTime="2026-01-27 20:23:57.433269058 +0000 UTC m=+1262.823522214" Jan 27 20:23:57 crc kubenswrapper[4793]: I0127 20:23:57.741897 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 27 20:23:57 crc kubenswrapper[4793]: W0127 20:23:57.747116 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08f0f5f2_6088_4d9e_a8fb_a5b87c64ceda.slice/crio-45f1b78906ec297bde163484b729c433c08bb25154ddfdac7a2a10c8559111fb WatchSource:0}: Error finding container 45f1b78906ec297bde163484b729c433c08bb25154ddfdac7a2a10c8559111fb: Status 404 returned error can't find the container with id 45f1b78906ec297bde163484b729c433c08bb25154ddfdac7a2a10c8559111fb Jan 27 20:23:58 crc kubenswrapper[4793]: I0127 20:23:58.381951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda","Type":"ContainerStarted","Data":"45f1b78906ec297bde163484b729c433c08bb25154ddfdac7a2a10c8559111fb"} Jan 27 20:23:58 crc kubenswrapper[4793]: I0127 20:23:58.384085 4793 generic.go:334] "Generic (PLEG): container finished" podID="cd405c61-4515-4524-8b4c-c30fcc225b3b" containerID="c019218c7ff62eec93e4e65050f60e4e0fd4ff1e8d3c72db1bc218646478bf5d" exitCode=0 Jan 27 20:23:58 crc kubenswrapper[4793]: I0127 20:23:58.384149 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cd405c61-4515-4524-8b4c-c30fcc225b3b","Type":"ContainerDied","Data":"c019218c7ff62eec93e4e65050f60e4e0fd4ff1e8d3c72db1bc218646478bf5d"} Jan 27 20:23:58 crc kubenswrapper[4793]: I0127 20:23:58.389273 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974","Type":"ContainerStarted","Data":"d03d14c21136d1f0f19e1ad5ef8a5032048e660876471ca61bd79e219f73918a"} Jan 27 20:23:58 crc kubenswrapper[4793]: I0127 20:23:58.447374 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=16.319127315 podStartE2EDuration="54.447349313s" podCreationTimestamp="2026-01-27 20:23:04 +0000 UTC" firstStartedPulling="2026-01-27 20:23:10.203726106 +0000 UTC m=+1215.593979252" lastFinishedPulling="2026-01-27 20:23:48.331948094 +0000 UTC m=+1253.722201250" observedRunningTime="2026-01-27 20:23:58.43986508 +0000 UTC m=+1263.830118246" watchObservedRunningTime="2026-01-27 20:23:58.447349313 +0000 UTC m=+1263.837602459" Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.397812 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cd405c61-4515-4524-8b4c-c30fcc225b3b","Type":"ContainerStarted","Data":"0157e53c27c3ca3bb4598b27c1d4a21607f33e1c8ce049788fda7b24f8c3a811"} Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.401494 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda","Type":"ContainerStarted","Data":"3baef56c8f992545ae1bb707e8fe3814e4eb80fb26d6cc4f47582ed2b093b728"} Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.401559 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda","Type":"ContainerStarted","Data":"0e6a881a76278860da0b5990926632ae1660ce31294c92411f10bdc8d95d7cce"} Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.421606 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=14.855908543 podStartE2EDuration="52.4215579s" podCreationTimestamp="2026-01-27 20:23:07 +0000 UTC" firstStartedPulling="2026-01-27 20:23:10.8612129 +0000 UTC m=+1216.251466056" lastFinishedPulling="2026-01-27 20:23:48.426862257 +0000 UTC m=+1253.817115413" observedRunningTime="2026-01-27 20:23:59.420909864 +0000 UTC m=+1264.811163020" watchObservedRunningTime="2026-01-27 20:23:59.4215579 +0000 UTC m=+1264.811811096" Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.425372 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.425428 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 27 20:23:59 crc kubenswrapper[4793]: I0127 20:23:59.461030 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.972875107 podStartE2EDuration="3.461007779s" podCreationTimestamp="2026-01-27 20:23:56 +0000 UTC" firstStartedPulling="2026-01-27 20:23:57.749241522 +0000 UTC m=+1263.139494678" lastFinishedPulling="2026-01-27 20:23:58.237374194 +0000 UTC m=+1263.627627350" observedRunningTime="2026-01-27 20:23:59.441500489 +0000 UTC m=+1264.831753665" watchObservedRunningTime="2026-01-27 20:23:59.461007779 +0000 UTC m=+1264.851260945" Jan 27 20:24:00 crc kubenswrapper[4793]: I0127 20:24:00.408581 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerStarted","Data":"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa"} Jan 27 20:24:00 crc kubenswrapper[4793]: I0127 20:24:00.408954 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 27 20:24:01 crc kubenswrapper[4793]: I0127 20:24:01.419090 4793 generic.go:334] "Generic (PLEG): container finished" podID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerID="2f5bc73680dbd5086bded0a32b53b08a40c7302f3f3ec85df280dc0f533103f9" exitCode=0 Jan 27 20:24:01 crc kubenswrapper[4793]: I0127 20:24:01.419200 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerDied","Data":"2f5bc73680dbd5086bded0a32b53b08a40c7302f3f3ec85df280dc0f533103f9"} Jan 27 20:24:03 crc kubenswrapper[4793]: I0127 20:24:03.041863 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 27 20:24:04 crc kubenswrapper[4793]: I0127 20:24:04.192708 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:24:04 crc kubenswrapper[4793]: I0127 20:24:04.653755 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:24:04 crc kubenswrapper[4793]: I0127 20:24:04.705258 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:24:04 crc kubenswrapper[4793]: I0127 20:24:04.706317 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="dnsmasq-dns" containerID="cri-o://ea204ad89e328d5c7f209730e76b99e2fca755024091d38c59912c3e028ebddf" gracePeriod=10 Jan 27 20:24:05 crc kubenswrapper[4793]: I0127 20:24:05.452611 4793 generic.go:334] "Generic (PLEG): container finished" podID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerID="ea204ad89e328d5c7f209730e76b99e2fca755024091d38c59912c3e028ebddf" exitCode=0 Jan 27 20:24:05 crc kubenswrapper[4793]: I0127 20:24:05.452996 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" event={"ID":"262c27f3-d284-4ae0-ac78-6821ec08221b","Type":"ContainerDied","Data":"ea204ad89e328d5c7f209730e76b99e2fca755024091d38c59912c3e028ebddf"} Jan 27 20:24:05 crc kubenswrapper[4793]: I0127 20:24:05.628101 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 27 20:24:05 crc kubenswrapper[4793]: I0127 20:24:05.747179 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.907487 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-9mgt5"] Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.909059 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.911027 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.914641 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-9mgt5"] Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.984304 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:06 crc kubenswrapper[4793]: I0127 20:24:06.984675 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgwbh\" (UniqueName: \"kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.030825 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.030883 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.086284 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.086357 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgwbh\" (UniqueName: \"kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.087449 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.112427 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgwbh\" (UniqueName: \"kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh\") pod \"root-account-create-update-9mgt5\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.134334 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.227260 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.244639 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.289172 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb\") pod \"262c27f3-d284-4ae0-ac78-6821ec08221b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.289258 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config\") pod \"262c27f3-d284-4ae0-ac78-6821ec08221b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.289417 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cv5hd\" (UniqueName: \"kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd\") pod \"262c27f3-d284-4ae0-ac78-6821ec08221b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.289449 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc\") pod \"262c27f3-d284-4ae0-ac78-6821ec08221b\" (UID: \"262c27f3-d284-4ae0-ac78-6821ec08221b\") " Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.294380 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd" (OuterVolumeSpecName: "kube-api-access-cv5hd") pod "262c27f3-d284-4ae0-ac78-6821ec08221b" (UID: "262c27f3-d284-4ae0-ac78-6821ec08221b"). InnerVolumeSpecName "kube-api-access-cv5hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.338705 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config" (OuterVolumeSpecName: "config") pod "262c27f3-d284-4ae0-ac78-6821ec08221b" (UID: "262c27f3-d284-4ae0-ac78-6821ec08221b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.351199 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "262c27f3-d284-4ae0-ac78-6821ec08221b" (UID: "262c27f3-d284-4ae0-ac78-6821ec08221b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.360278 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "262c27f3-d284-4ae0-ac78-6821ec08221b" (UID: "262c27f3-d284-4ae0-ac78-6821ec08221b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.393712 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cv5hd\" (UniqueName: \"kubernetes.io/projected/262c27f3-d284-4ae0-ac78-6821ec08221b-kube-api-access-cv5hd\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.393991 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.394005 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.394016 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/262c27f3-d284-4ae0-ac78-6821ec08221b-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.493385 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.505723 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c56b79fc-tv48b" event={"ID":"262c27f3-d284-4ae0-ac78-6821ec08221b","Type":"ContainerDied","Data":"dd9a8b392281f75e3c6ba4d545b1aab0e2fd544c544f83aafd14d304fa09f2fc"} Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.505800 4793 scope.go:117] "RemoveContainer" containerID="ea204ad89e328d5c7f209730e76b99e2fca755024091d38c59912c3e028ebddf" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.643791 4793 scope.go:117] "RemoveContainer" containerID="7fe75d0db99ba9a3eab8da1c1b62fc9dc40ec04ac5ab3ba63d4107e9e6cb0312" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.675809 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.683521 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78c56b79fc-tv48b"] Jan 27 20:24:07 crc kubenswrapper[4793]: E0127 20:24:07.694806 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod262c27f3_d284_4ae0_ac78_6821ec08221b.slice\": RecentStats: unable to find data in memory cache]" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.814471 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" path="/var/lib/kubelet/pods/262c27f3-d284-4ae0-ac78-6821ec08221b/volumes" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.816023 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 27 20:24:07 crc kubenswrapper[4793]: I0127 20:24:07.909924 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-9mgt5"] Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.502033 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"58b757ab-b790-4b71-888f-49d52dc5e80d","Type":"ContainerStarted","Data":"83b261d127c85db97c4dc8d55e89046cbb9ea938c774c6f8eb8579770e1768eb"} Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.502243 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.506253 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerStarted","Data":"f28c44283d04df364885d9029074b7e6b250419350041bf36500e618101a284e"} Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.507723 4793 generic.go:334] "Generic (PLEG): container finished" podID="4536e6b5-2081-41ff-9824-53318cd7d7f7" containerID="d254585d3a1638b741163ab1ed4c8c7be458c83042ef63e1ed060e5d1852d0dd" exitCode=0 Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.507784 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9mgt5" event={"ID":"4536e6b5-2081-41ff-9824-53318cd7d7f7","Type":"ContainerDied","Data":"d254585d3a1638b741163ab1ed4c8c7be458c83042ef63e1ed060e5d1852d0dd"} Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.507832 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9mgt5" event={"ID":"4536e6b5-2081-41ff-9824-53318cd7d7f7","Type":"ContainerStarted","Data":"0bc4bed99c9f4d0ea9d1d750fdd3cf39110848fd78877dbc49ad182add5af04c"} Jan 27 20:24:08 crc kubenswrapper[4793]: I0127 20:24:08.525338 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.296480202 podStartE2EDuration="57.525321455s" podCreationTimestamp="2026-01-27 20:23:11 +0000 UTC" firstStartedPulling="2026-01-27 20:23:13.085624162 +0000 UTC m=+1218.475877318" lastFinishedPulling="2026-01-27 20:24:07.314465415 +0000 UTC m=+1272.704718571" observedRunningTime="2026-01-27 20:24:08.520103216 +0000 UTC m=+1273.910356382" watchObservedRunningTime="2026-01-27 20:24:08.525321455 +0000 UTC m=+1273.915574611" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.303111 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.349914 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgwbh\" (UniqueName: \"kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh\") pod \"4536e6b5-2081-41ff-9824-53318cd7d7f7\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.350066 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts\") pod \"4536e6b5-2081-41ff-9824-53318cd7d7f7\" (UID: \"4536e6b5-2081-41ff-9824-53318cd7d7f7\") " Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.350845 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4536e6b5-2081-41ff-9824-53318cd7d7f7" (UID: "4536e6b5-2081-41ff-9824-53318cd7d7f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.356526 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh" (OuterVolumeSpecName: "kube-api-access-fgwbh") pod "4536e6b5-2081-41ff-9824-53318cd7d7f7" (UID: "4536e6b5-2081-41ff-9824-53318cd7d7f7"). InnerVolumeSpecName "kube-api-access-fgwbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.452279 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4536e6b5-2081-41ff-9824-53318cd7d7f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.452318 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgwbh\" (UniqueName: \"kubernetes.io/projected/4536e6b5-2081-41ff-9824-53318cd7d7f7-kube-api-access-fgwbh\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.528795 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9mgt5" event={"ID":"4536e6b5-2081-41ff-9824-53318cd7d7f7","Type":"ContainerDied","Data":"0bc4bed99c9f4d0ea9d1d750fdd3cf39110848fd78877dbc49ad182add5af04c"} Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.528845 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bc4bed99c9f4d0ea9d1d750fdd3cf39110848fd78877dbc49ad182add5af04c" Jan 27 20:24:10 crc kubenswrapper[4793]: I0127 20:24:10.528878 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9mgt5" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.661845 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerStarted","Data":"d3bd107fee0835adeafd44074ea03d8907cd9432884fed26cfdca79e2cf69aee"} Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696060 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-kth5t"] Jan 27 20:24:11 crc kubenswrapper[4793]: E0127 20:24:11.696432 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4536e6b5-2081-41ff-9824-53318cd7d7f7" containerName="mariadb-account-create-update" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696449 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4536e6b5-2081-41ff-9824-53318cd7d7f7" containerName="mariadb-account-create-update" Jan 27 20:24:11 crc kubenswrapper[4793]: E0127 20:24:11.696475 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="dnsmasq-dns" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696481 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="dnsmasq-dns" Jan 27 20:24:11 crc kubenswrapper[4793]: E0127 20:24:11.696493 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="init" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696499 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="init" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696673 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4536e6b5-2081-41ff-9824-53318cd7d7f7" containerName="mariadb-account-create-update" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.696690 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="262c27f3-d284-4ae0-ac78-6821ec08221b" containerName="dnsmasq-dns" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.697247 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.726574 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-kth5t"] Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.765448 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.767367 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.811876 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.892153 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-3d85-account-create-update-qqk7s"] Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.909830 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.922213 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.923657 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzg2l\" (UniqueName: \"kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.923755 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.923819 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.924126 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.924190 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.924337 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.924366 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb52r\" (UniqueName: \"kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:11 crc kubenswrapper[4793]: I0127 20:24:11.936033 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-3d85-account-create-update-qqk7s"] Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.026427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd9ws\" (UniqueName: \"kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.027050 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.027205 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb52r\" (UniqueName: \"kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.027372 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzg2l\" (UniqueName: \"kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.029192 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.029380 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.029426 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.029577 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.029687 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.028815 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.030992 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.031752 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.032401 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.037405 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.051723 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb52r\" (UniqueName: \"kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r\") pod \"watcher-db-create-kth5t\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.062799 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzg2l\" (UniqueName: \"kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l\") pod \"dnsmasq-dns-74bdb45575-5dqh9\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.116635 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.131199 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.131305 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd9ws\" (UniqueName: \"kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.133274 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.154156 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd9ws\" (UniqueName: \"kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws\") pod \"watcher-3d85-account-create-update-qqk7s\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.259441 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.321575 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.645001 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:24:12 crc kubenswrapper[4793]: W0127 20:24:12.663355 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1959c2b9_5a70_4503_aef6_52dcfe28dd73.slice/crio-4e9dc7e08565d00a7b175ef0da39b42b160a0f2e9222c402f66f5d753fb7f7f5 WatchSource:0}: Error finding container 4e9dc7e08565d00a7b175ef0da39b42b160a0f2e9222c402f66f5d753fb7f7f5: Status 404 returned error can't find the container with id 4e9dc7e08565d00a7b175ef0da39b42b160a0f2e9222c402f66f5d753fb7f7f5 Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.679086 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" event={"ID":"1959c2b9-5a70-4503-aef6-52dcfe28dd73","Type":"ContainerStarted","Data":"4e9dc7e08565d00a7b175ef0da39b42b160a0f2e9222c402f66f5d753fb7f7f5"} Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.839770 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-3d85-account-create-update-qqk7s"] Jan 27 20:24:12 crc kubenswrapper[4793]: W0127 20:24:12.856819 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0ab0f0f_0759_461c_b18e_ca19b1a627e0.slice/crio-ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d WatchSource:0}: Error finding container ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d: Status 404 returned error can't find the container with id ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.913100 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.948942 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.950857 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.952108 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.952370 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.954771 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-kmcq8" Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.965542 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-kth5t"] Jan 27 20:24:12 crc kubenswrapper[4793]: I0127 20:24:12.966527 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077347 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077431 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077511 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077531 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-cache\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077579 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-lock\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.077608 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xc68\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-kube-api-access-6xc68\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180455 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180520 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-cache\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180570 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-lock\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180609 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xc68\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-kube-api-access-6xc68\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.180666 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.180690 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.180748 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:13.680726565 +0000 UTC m=+1279.070979721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180856 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.180990 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.181392 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-lock\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.181469 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.181495 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-cache\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.187941 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.209095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xc68\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-kube-api-access-6xc68\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.228192 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.683270 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.683522 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.683535 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: E0127 20:24:13.683592 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:14.68357744 +0000 UTC m=+1280.073830596 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.713848 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-2m2n7"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.715083 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.717413 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kth5t" event={"ID":"9d33de24-4f5c-4cff-8da3-7848753edd2a","Type":"ContainerStarted","Data":"aeccdaccdb730b33679ec0e40e779a95e205ac66d2ad54403cf01fd399a72632"} Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.717884 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.719044 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.722679 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.723609 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3d85-account-create-update-qqk7s" event={"ID":"b0ab0f0f-0759-461c-b18e-ca19b1a627e0","Type":"ContainerStarted","Data":"ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d"} Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.724713 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-2m2n7"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.818738 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-9mgt5"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.822125 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-9mgt5"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.851036 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kbw4s"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.853016 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.857166 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.875324 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kbw4s"] Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889268 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889389 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpdvp\" (UniqueName: \"kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889540 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889673 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889715 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889737 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.889788 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991521 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991634 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpdvp\" (UniqueName: \"kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991704 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991762 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdrwt\" (UniqueName: \"kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991811 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991850 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.991992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.992029 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.992065 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.992931 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.993309 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.993319 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.998172 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:13 crc kubenswrapper[4793]: I0127 20:24:13.999709 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.000114 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.018082 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpdvp\" (UniqueName: \"kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp\") pod \"swift-ring-rebalance-2m2n7\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.034830 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.094673 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdrwt\" (UniqueName: \"kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.094963 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.547055 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.807730 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:14 crc kubenswrapper[4793]: E0127 20:24:14.808863 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:14 crc kubenswrapper[4793]: E0127 20:24:14.810080 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:14 crc kubenswrapper[4793]: E0127 20:24:14.810134 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:16.810113569 +0000 UTC m=+1282.200366725 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:14 crc kubenswrapper[4793]: I0127 20:24:14.818198 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdrwt\" (UniqueName: \"kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt\") pod \"root-account-create-update-kbw4s\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.084023 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.225837 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-2m2n7"] Jan 27 20:24:15 crc kubenswrapper[4793]: W0127 20:24:15.238843 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5ddc141_eae8_4a4c_b118_a79a9276cf33.slice/crio-397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27 WatchSource:0}: Error finding container 397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27: Status 404 returned error can't find the container with id 397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27 Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.677766 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kbw4s"] Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.812417 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4536e6b5-2081-41ff-9824-53318cd7d7f7" path="/var/lib/kubelet/pods/4536e6b5-2081-41ff-9824-53318cd7d7f7/volumes" Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.819466 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kbw4s" event={"ID":"a267a0de-2ca0-4324-a99d-faf15e41e8ff","Type":"ContainerStarted","Data":"788fc80da87871a826b3bb07c0a2e97fe5c4e2584885c05b289f694881ff860c"} Jan 27 20:24:15 crc kubenswrapper[4793]: I0127 20:24:15.820922 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2m2n7" event={"ID":"f5ddc141-eae8-4a4c-b118-a79a9276cf33","Type":"ContainerStarted","Data":"397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27"} Jan 27 20:24:16 crc kubenswrapper[4793]: I0127 20:24:16.822651 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:16 crc kubenswrapper[4793]: E0127 20:24:16.822838 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:16 crc kubenswrapper[4793]: E0127 20:24:16.822854 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:16 crc kubenswrapper[4793]: E0127 20:24:16.822896 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:20.822880271 +0000 UTC m=+1286.213133427 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.093488 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-wzh4x"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.095454 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.112741 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wzh4x"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.302229 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.302483 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df55b\" (UniqueName: \"kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.357344 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.404804 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df55b\" (UniqueName: \"kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.404907 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.405661 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.434394 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-a8bf-account-create-update-jpwvn"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.434887 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df55b\" (UniqueName: \"kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b\") pod \"keystone-db-create-wzh4x\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.436053 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.444756 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.469966 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a8bf-account-create-update-jpwvn"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.506268 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.506639 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdk7h\" (UniqueName: \"kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.608497 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.608635 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdk7h\" (UniqueName: \"kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.609264 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.632259 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-dtqh6"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.633701 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.648677 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f09f-account-create-update-kchl7"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.649291 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdk7h\" (UniqueName: \"kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h\") pod \"keystone-a8bf-account-create-update-jpwvn\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.651677 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.655286 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.672652 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dtqh6"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.690905 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f09f-account-create-update-kchl7"] Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.710476 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.710567 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.710749 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrkfj\" (UniqueName: \"kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.710835 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkfxs\" (UniqueName: \"kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.716489 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.793987 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.813266 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrkfj\" (UniqueName: \"kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.813627 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkfxs\" (UniqueName: \"kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.813711 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.813740 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.815782 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.817192 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.852503 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrkfj\" (UniqueName: \"kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj\") pod \"placement-db-create-dtqh6\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.855393 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkfxs\" (UniqueName: \"kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs\") pod \"placement-f09f-account-create-update-kchl7\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:17 crc kubenswrapper[4793]: I0127 20:24:17.959000 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:18 crc kubenswrapper[4793]: I0127 20:24:18.020287 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:18 crc kubenswrapper[4793]: I0127 20:24:18.232122 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wzh4x"] Jan 27 20:24:18 crc kubenswrapper[4793]: W0127 20:24:18.256745 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf847ba7a_9d1b_4fbd_8ca3_7d64813b628a.slice/crio-a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c WatchSource:0}: Error finding container a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c: Status 404 returned error can't find the container with id a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c Jan 27 20:24:18 crc kubenswrapper[4793]: I0127 20:24:18.347527 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a8bf-account-create-update-jpwvn"] Jan 27 20:24:18 crc kubenswrapper[4793]: I0127 20:24:18.367615 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f09f-account-create-update-kchl7"] Jan 27 20:24:18 crc kubenswrapper[4793]: I0127 20:24:18.485969 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dtqh6"] Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.013828 4793 generic.go:334] "Generic (PLEG): container finished" podID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerID="4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb" exitCode=0 Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.013908 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" event={"ID":"1959c2b9-5a70-4503-aef6-52dcfe28dd73","Type":"ContainerDied","Data":"4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.042742 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a8bf-account-create-update-jpwvn" event={"ID":"9047e5d3-ba8c-49d3-af56-6f6b3a090759","Type":"ContainerStarted","Data":"1ba26e21b66fbc867cee1e1c13aa2b549d0cb9bbb14b0a1fad66c04f99e56556"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.042799 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a8bf-account-create-update-jpwvn" event={"ID":"9047e5d3-ba8c-49d3-af56-6f6b3a090759","Type":"ContainerStarted","Data":"f054ad75f1bab1c3002f3d041e7c386346431d93385b9cd3c2544944ab20f39f"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.070762 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kbw4s" event={"ID":"a267a0de-2ca0-4324-a99d-faf15e41e8ff","Type":"ContainerStarted","Data":"6c02c5b0406fcdb10b2247b96accf11bff0027707f55f2f43b52506ccfe6e2d1"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.074526 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-a8bf-account-create-update-jpwvn" podStartSLOduration=2.074504862 podStartE2EDuration="2.074504862s" podCreationTimestamp="2026-01-27 20:24:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:19.072612075 +0000 UTC m=+1284.462865231" watchObservedRunningTime="2026-01-27 20:24:19.074504862 +0000 UTC m=+1284.464758018" Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.083856 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3d85-account-create-update-qqk7s" event={"ID":"b0ab0f0f-0759-461c-b18e-ca19b1a627e0","Type":"ContainerStarted","Data":"0a3b4d61631f60c9a06084897dc2e6a6dd357c966ba1b3d2a070ac08424b0bfc"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.098109 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d33de24-4f5c-4cff-8da3-7848753edd2a" containerID="63064aa02c2270eb030fc2d7958b8f3befa944fe2ea1cd70f86590ad461d57e2" exitCode=0 Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.098150 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-kbw4s" podStartSLOduration=6.098135372 podStartE2EDuration="6.098135372s" podCreationTimestamp="2026-01-27 20:24:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:19.092464213 +0000 UTC m=+1284.482717379" watchObservedRunningTime="2026-01-27 20:24:19.098135372 +0000 UTC m=+1284.488388528" Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.098199 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kth5t" event={"ID":"9d33de24-4f5c-4cff-8da3-7848753edd2a","Type":"ContainerDied","Data":"63064aa02c2270eb030fc2d7958b8f3befa944fe2ea1cd70f86590ad461d57e2"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.100728 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f09f-account-create-update-kchl7" event={"ID":"29c672ac-584e-4777-8c08-4f78c6286686","Type":"ContainerStarted","Data":"1e126272c77339b48ba0d47c59b547ca8b6f9df57ef5d19b575f2afdcd4f0ea7"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.102295 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wzh4x" event={"ID":"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a","Type":"ContainerStarted","Data":"29c4c37cfbf9cb56c22b7aa90d7b916d9fdc12a02810ff12030a18987dd0a2b7"} Jan 27 20:24:19 crc kubenswrapper[4793]: I0127 20:24:19.102325 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wzh4x" event={"ID":"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a","Type":"ContainerStarted","Data":"a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.115897 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dtqh6" event={"ID":"4c2d66d7-1414-4bc8-9131-9af3080a5c4f","Type":"ContainerStarted","Data":"5ad6a7efcba56f4aa977f41c194f05638235e97d479801b2ec91f8889f0635ed"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.115947 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dtqh6" event={"ID":"4c2d66d7-1414-4bc8-9131-9af3080a5c4f","Type":"ContainerStarted","Data":"a644da81845eb52c2c203059725d6b3fa21ecc613b42fb37f75a1e0e8d939e3f"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.122210 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerStarted","Data":"9780dab57b81cbb94d3c418e6f1865a3cfde246b0c2e32c0c1cf812c5f677dd8"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.124795 4793 generic.go:334] "Generic (PLEG): container finished" podID="a267a0de-2ca0-4324-a99d-faf15e41e8ff" containerID="6c02c5b0406fcdb10b2247b96accf11bff0027707f55f2f43b52506ccfe6e2d1" exitCode=0 Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.124866 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kbw4s" event={"ID":"a267a0de-2ca0-4324-a99d-faf15e41e8ff","Type":"ContainerDied","Data":"6c02c5b0406fcdb10b2247b96accf11bff0027707f55f2f43b52506ccfe6e2d1"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.128022 4793 generic.go:334] "Generic (PLEG): container finished" podID="b0ab0f0f-0759-461c-b18e-ca19b1a627e0" containerID="0a3b4d61631f60c9a06084897dc2e6a6dd357c966ba1b3d2a070ac08424b0bfc" exitCode=0 Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.128092 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3d85-account-create-update-qqk7s" event={"ID":"b0ab0f0f-0759-461c-b18e-ca19b1a627e0","Type":"ContainerDied","Data":"0a3b4d61631f60c9a06084897dc2e6a6dd357c966ba1b3d2a070ac08424b0bfc"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.133078 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-dtqh6" podStartSLOduration=3.133062131 podStartE2EDuration="3.133062131s" podCreationTimestamp="2026-01-27 20:24:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:20.129637476 +0000 UTC m=+1285.519890642" watchObservedRunningTime="2026-01-27 20:24:20.133062131 +0000 UTC m=+1285.523315287" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.133280 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f09f-account-create-update-kchl7" event={"ID":"29c672ac-584e-4777-8c08-4f78c6286686","Type":"ContainerStarted","Data":"0649ee374bd61944b4796eba42b0518fb013cd0f75d9c150853e874c7711218b"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.136399 4793 generic.go:334] "Generic (PLEG): container finished" podID="f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" containerID="29c4c37cfbf9cb56c22b7aa90d7b916d9fdc12a02810ff12030a18987dd0a2b7" exitCode=0 Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.136460 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wzh4x" event={"ID":"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a","Type":"ContainerDied","Data":"29c4c37cfbf9cb56c22b7aa90d7b916d9fdc12a02810ff12030a18987dd0a2b7"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.141010 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" event={"ID":"1959c2b9-5a70-4503-aef6-52dcfe28dd73","Type":"ContainerStarted","Data":"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.141989 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.143758 4793 generic.go:334] "Generic (PLEG): container finished" podID="9047e5d3-ba8c-49d3-af56-6f6b3a090759" containerID="1ba26e21b66fbc867cee1e1c13aa2b549d0cb9bbb14b0a1fad66c04f99e56556" exitCode=0 Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.143859 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a8bf-account-create-update-jpwvn" event={"ID":"9047e5d3-ba8c-49d3-af56-6f6b3a090759","Type":"ContainerDied","Data":"1ba26e21b66fbc867cee1e1c13aa2b549d0cb9bbb14b0a1fad66c04f99e56556"} Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.170072 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=22.116608681 podStartE2EDuration="1m7.170044249s" podCreationTimestamp="2026-01-27 20:23:13 +0000 UTC" firstStartedPulling="2026-01-27 20:23:34.73072542 +0000 UTC m=+1240.120978566" lastFinishedPulling="2026-01-27 20:24:19.784160978 +0000 UTC m=+1285.174414134" observedRunningTime="2026-01-27 20:24:20.165271262 +0000 UTC m=+1285.555524438" watchObservedRunningTime="2026-01-27 20:24:20.170044249 +0000 UTC m=+1285.560297405" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.219265 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" podStartSLOduration=9.219239647 podStartE2EDuration="9.219239647s" podCreationTimestamp="2026-01-27 20:24:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:20.21565584 +0000 UTC m=+1285.605909006" watchObservedRunningTime="2026-01-27 20:24:20.219239647 +0000 UTC m=+1285.609492803" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.266476 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-f09f-account-create-update-kchl7" podStartSLOduration=3.266456968 podStartE2EDuration="3.266456968s" podCreationTimestamp="2026-01-27 20:24:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:20.26369081 +0000 UTC m=+1285.653943966" watchObservedRunningTime="2026-01-27 20:24:20.266456968 +0000 UTC m=+1285.656710124" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.279722 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:20 crc kubenswrapper[4793]: I0127 20:24:20.830529 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:20 crc kubenswrapper[4793]: E0127 20:24:20.830712 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:20 crc kubenswrapper[4793]: E0127 20:24:20.831083 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:20 crc kubenswrapper[4793]: E0127 20:24:20.831138 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:28.831120481 +0000 UTC m=+1294.221373637 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:21 crc kubenswrapper[4793]: I0127 20:24:21.156018 4793 generic.go:334] "Generic (PLEG): container finished" podID="4c2d66d7-1414-4bc8-9131-9af3080a5c4f" containerID="5ad6a7efcba56f4aa977f41c194f05638235e97d479801b2ec91f8889f0635ed" exitCode=0 Jan 27 20:24:21 crc kubenswrapper[4793]: I0127 20:24:21.156097 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dtqh6" event={"ID":"4c2d66d7-1414-4bc8-9131-9af3080a5c4f","Type":"ContainerDied","Data":"5ad6a7efcba56f4aa977f41c194f05638235e97d479801b2ec91f8889f0635ed"} Jan 27 20:24:21 crc kubenswrapper[4793]: I0127 20:24:21.158637 4793 generic.go:334] "Generic (PLEG): container finished" podID="29c672ac-584e-4777-8c08-4f78c6286686" containerID="0649ee374bd61944b4796eba42b0518fb013cd0f75d9c150853e874c7711218b" exitCode=0 Jan 27 20:24:21 crc kubenswrapper[4793]: I0127 20:24:21.158729 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f09f-account-create-update-kchl7" event={"ID":"29c672ac-584e-4777-8c08-4f78c6286686","Type":"ContainerDied","Data":"0649ee374bd61944b4796eba42b0518fb013cd0f75d9c150853e874c7711218b"} Jan 27 20:24:21 crc kubenswrapper[4793]: I0127 20:24:21.607829 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.128297 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.138041 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.194299 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kbw4s" event={"ID":"a267a0de-2ca0-4324-a99d-faf15e41e8ff","Type":"ContainerDied","Data":"788fc80da87871a826b3bb07c0a2e97fe5c4e2584885c05b289f694881ff860c"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.194344 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="788fc80da87871a826b3bb07c0a2e97fe5c4e2584885c05b289f694881ff860c" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.197100 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-3d85-account-create-update-qqk7s" event={"ID":"b0ab0f0f-0759-461c-b18e-ca19b1a627e0","Type":"ContainerDied","Data":"ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.197137 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce15a4b8423d43be418cbb232b8d74d41bb96097bab483b4e9df738982e4ab5d" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.197158 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-3d85-account-create-update-qqk7s" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.200075 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kth5t" event={"ID":"9d33de24-4f5c-4cff-8da3-7848753edd2a","Type":"ContainerDied","Data":"aeccdaccdb730b33679ec0e40e779a95e205ac66d2ad54403cf01fd399a72632"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.200104 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aeccdaccdb730b33679ec0e40e779a95e205ac66d2ad54403cf01fd399a72632" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.202385 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f09f-account-create-update-kchl7" event={"ID":"29c672ac-584e-4777-8c08-4f78c6286686","Type":"ContainerDied","Data":"1e126272c77339b48ba0d47c59b547ca8b6f9df57ef5d19b575f2afdcd4f0ea7"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.202430 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e126272c77339b48ba0d47c59b547ca8b6f9df57ef5d19b575f2afdcd4f0ea7" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.203761 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wzh4x" event={"ID":"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a","Type":"ContainerDied","Data":"a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.203786 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3d753cbc2b32a8ceb9f72e56f85bc6e22f3cb5fb0530f319ded5f67e0f68e0c" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.205215 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a8bf-account-create-update-jpwvn" event={"ID":"9047e5d3-ba8c-49d3-af56-6f6b3a090759","Type":"ContainerDied","Data":"f054ad75f1bab1c3002f3d041e7c386346431d93385b9cd3c2544944ab20f39f"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.205246 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f054ad75f1bab1c3002f3d041e7c386346431d93385b9cd3c2544944ab20f39f" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.208049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dtqh6" event={"ID":"4c2d66d7-1414-4bc8-9131-9af3080a5c4f","Type":"ContainerDied","Data":"a644da81845eb52c2c203059725d6b3fa21ecc613b42fb37f75a1e0e8d939e3f"} Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.208071 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a644da81845eb52c2c203059725d6b3fa21ecc613b42fb37f75a1e0e8d939e3f" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.208113 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dtqh6" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.228333 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.235940 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.296251 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts\") pod \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.296339 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrkfj\" (UniqueName: \"kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj\") pod \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\" (UID: \"4c2d66d7-1414-4bc8-9131-9af3080a5c4f\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.296404 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts\") pod \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.296452 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd9ws\" (UniqueName: \"kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws\") pod \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\" (UID: \"b0ab0f0f-0759-461c-b18e-ca19b1a627e0\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.297172 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4c2d66d7-1414-4bc8-9131-9af3080a5c4f" (UID: "4c2d66d7-1414-4bc8-9131-9af3080a5c4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.297276 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0ab0f0f-0759-461c-b18e-ca19b1a627e0" (UID: "b0ab0f0f-0759-461c-b18e-ca19b1a627e0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.297799 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.297818 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.301353 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj" (OuterVolumeSpecName: "kube-api-access-mrkfj") pod "4c2d66d7-1414-4bc8-9131-9af3080a5c4f" (UID: "4c2d66d7-1414-4bc8-9131-9af3080a5c4f"). InnerVolumeSpecName "kube-api-access-mrkfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.301746 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws" (OuterVolumeSpecName: "kube-api-access-cd9ws") pod "b0ab0f0f-0759-461c-b18e-ca19b1a627e0" (UID: "b0ab0f0f-0759-461c-b18e-ca19b1a627e0"). InnerVolumeSpecName "kube-api-access-cd9ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.306150 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.317762 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.357944 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399080 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts\") pod \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399154 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts\") pod \"9d33de24-4f5c-4cff-8da3-7848753edd2a\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399185 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df55b\" (UniqueName: \"kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b\") pod \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\" (UID: \"f847ba7a-9d1b-4fbd-8ca3-7d64813b628a\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399269 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb52r\" (UniqueName: \"kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r\") pod \"9d33de24-4f5c-4cff-8da3-7848753edd2a\" (UID: \"9d33de24-4f5c-4cff-8da3-7848753edd2a\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399692 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" (UID: "f847ba7a-9d1b-4fbd-8ca3-7d64813b628a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399719 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d33de24-4f5c-4cff-8da3-7848753edd2a" (UID: "9d33de24-4f5c-4cff-8da3-7848753edd2a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.399999 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.400020 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d33de24-4f5c-4cff-8da3-7848753edd2a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.400033 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrkfj\" (UniqueName: \"kubernetes.io/projected/4c2d66d7-1414-4bc8-9131-9af3080a5c4f-kube-api-access-mrkfj\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.400049 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd9ws\" (UniqueName: \"kubernetes.io/projected/b0ab0f0f-0759-461c-b18e-ca19b1a627e0-kube-api-access-cd9ws\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.445580 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b" (OuterVolumeSpecName: "kube-api-access-df55b") pod "f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" (UID: "f847ba7a-9d1b-4fbd-8ca3-7d64813b628a"). InnerVolumeSpecName "kube-api-access-df55b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.445912 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r" (OuterVolumeSpecName: "kube-api-access-fb52r") pod "9d33de24-4f5c-4cff-8da3-7848753edd2a" (UID: "9d33de24-4f5c-4cff-8da3-7848753edd2a"). InnerVolumeSpecName "kube-api-access-fb52r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.447100 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fxmkt" podUID="590c61a6-8355-4f4f-be2b-4680745b4732" containerName="ovn-controller" probeResult="failure" output=< Jan 27 20:24:23 crc kubenswrapper[4793]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 27 20:24:23 crc kubenswrapper[4793]: > Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.501212 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdrwt\" (UniqueName: \"kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt\") pod \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.501301 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts\") pod \"29c672ac-584e-4777-8c08-4f78c6286686\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.501943 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts\") pod \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\" (UID: \"a267a0de-2ca0-4324-a99d-faf15e41e8ff\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502034 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdk7h\" (UniqueName: \"kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h\") pod \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.501937 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29c672ac-584e-4777-8c08-4f78c6286686" (UID: "29c672ac-584e-4777-8c08-4f78c6286686"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502080 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts\") pod \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\" (UID: \"9047e5d3-ba8c-49d3-af56-6f6b3a090759\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502144 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkfxs\" (UniqueName: \"kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs\") pod \"29c672ac-584e-4777-8c08-4f78c6286686\" (UID: \"29c672ac-584e-4777-8c08-4f78c6286686\") " Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502615 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c672ac-584e-4777-8c08-4f78c6286686-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502633 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df55b\" (UniqueName: \"kubernetes.io/projected/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a-kube-api-access-df55b\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502666 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb52r\" (UniqueName: \"kubernetes.io/projected/9d33de24-4f5c-4cff-8da3-7848753edd2a-kube-api-access-fb52r\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502668 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a267a0de-2ca0-4324-a99d-faf15e41e8ff" (UID: "a267a0de-2ca0-4324-a99d-faf15e41e8ff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.502732 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9047e5d3-ba8c-49d3-af56-6f6b3a090759" (UID: "9047e5d3-ba8c-49d3-af56-6f6b3a090759"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.504340 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt" (OuterVolumeSpecName: "kube-api-access-wdrwt") pod "a267a0de-2ca0-4324-a99d-faf15e41e8ff" (UID: "a267a0de-2ca0-4324-a99d-faf15e41e8ff"). InnerVolumeSpecName "kube-api-access-wdrwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.505459 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h" (OuterVolumeSpecName: "kube-api-access-vdk7h") pod "9047e5d3-ba8c-49d3-af56-6f6b3a090759" (UID: "9047e5d3-ba8c-49d3-af56-6f6b3a090759"). InnerVolumeSpecName "kube-api-access-vdk7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.506158 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs" (OuterVolumeSpecName: "kube-api-access-jkfxs") pod "29c672ac-584e-4777-8c08-4f78c6286686" (UID: "29c672ac-584e-4777-8c08-4f78c6286686"). InnerVolumeSpecName "kube-api-access-jkfxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.604331 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a267a0de-2ca0-4324-a99d-faf15e41e8ff-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.604384 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdk7h\" (UniqueName: \"kubernetes.io/projected/9047e5d3-ba8c-49d3-af56-6f6b3a090759-kube-api-access-vdk7h\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.604406 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9047e5d3-ba8c-49d3-af56-6f6b3a090759-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.604422 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkfxs\" (UniqueName: \"kubernetes.io/projected/29c672ac-584e-4777-8c08-4f78c6286686-kube-api-access-jkfxs\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.604440 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdrwt\" (UniqueName: \"kubernetes.io/projected/a267a0de-2ca0-4324-a99d-faf15e41e8ff-kube-api-access-wdrwt\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:23 crc kubenswrapper[4793]: I0127 20:24:23.670100 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216265 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f09f-account-create-update-kchl7" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216292 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a8bf-account-create-update-jpwvn" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216325 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wzh4x" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216252 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2m2n7" event={"ID":"f5ddc141-eae8-4a4c-b118-a79a9276cf33","Type":"ContainerStarted","Data":"4a916b4844ef44b3ff72e48eb0693c696ad53c40953c04cc6734caf0998de5bf"} Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216442 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kbw4s" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.216925 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kth5t" Jan 27 20:24:24 crc kubenswrapper[4793]: I0127 20:24:24.241582 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-2m2n7" podStartSLOduration=3.357011075 podStartE2EDuration="11.241523853s" podCreationTimestamp="2026-01-27 20:24:13 +0000 UTC" firstStartedPulling="2026-01-27 20:24:15.242740658 +0000 UTC m=+1280.632993814" lastFinishedPulling="2026-01-27 20:24:23.127253436 +0000 UTC m=+1288.517506592" observedRunningTime="2026-01-27 20:24:24.235735451 +0000 UTC m=+1289.625988607" watchObservedRunningTime="2026-01-27 20:24:24.241523853 +0000 UTC m=+1289.631777009" Jan 27 20:24:25 crc kubenswrapper[4793]: I0127 20:24:25.224588 4793 generic.go:334] "Generic (PLEG): container finished" podID="d13b401d-8f36-4677-b782-ebf9a3d5daab" containerID="8c2bb6675873503ce7043b2eb6c32deb18600689c3d2411aad6191687007c533" exitCode=0 Jan 27 20:24:25 crc kubenswrapper[4793]: I0127 20:24:25.224686 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"d13b401d-8f36-4677-b782-ebf9a3d5daab","Type":"ContainerDied","Data":"8c2bb6675873503ce7043b2eb6c32deb18600689c3d2411aad6191687007c533"} Jan 27 20:24:26 crc kubenswrapper[4793]: I0127 20:24:26.408099 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"d13b401d-8f36-4677-b782-ebf9a3d5daab","Type":"ContainerStarted","Data":"54fa8bdd53d9fcf20219b4087843c13cde8da4eafdbfee529ac22e4129b7ece8"} Jan 27 20:24:26 crc kubenswrapper[4793]: I0127 20:24:26.408591 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:24:26 crc kubenswrapper[4793]: I0127 20:24:26.439402 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=42.689359186 podStartE2EDuration="1m19.439381953s" podCreationTimestamp="2026-01-27 20:23:07 +0000 UTC" firstStartedPulling="2026-01-27 20:23:11.448088999 +0000 UTC m=+1216.838342165" lastFinishedPulling="2026-01-27 20:23:48.198111776 +0000 UTC m=+1253.588364932" observedRunningTime="2026-01-27 20:24:26.428856034 +0000 UTC m=+1291.819109190" watchObservedRunningTime="2026-01-27 20:24:26.439381953 +0000 UTC m=+1291.829635119" Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.118067 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.209855 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.213982 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="dnsmasq-dns" containerID="cri-o://6f2f32a0329e95e7cb4c1b463f131c5c692536c3ada7b33c5ddf928afdf46911" gracePeriod=10 Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.597694 4793 generic.go:334] "Generic (PLEG): container finished" podID="2f32f180-5e5c-4b65-a124-684e2620b221" containerID="6f2f32a0329e95e7cb4c1b463f131c5c692536c3ada7b33c5ddf928afdf46911" exitCode=0 Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.598819 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" event={"ID":"2f32f180-5e5c-4b65-a124-684e2620b221","Type":"ContainerDied","Data":"6f2f32a0329e95e7cb4c1b463f131c5c692536c3ada7b33c5ddf928afdf46911"} Jan 27 20:24:27 crc kubenswrapper[4793]: I0127 20:24:27.892261 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.009648 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config\") pod \"2f32f180-5e5c-4b65-a124-684e2620b221\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.009806 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb\") pod \"2f32f180-5e5c-4b65-a124-684e2620b221\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.009867 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc\") pod \"2f32f180-5e5c-4b65-a124-684e2620b221\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.009925 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb\") pod \"2f32f180-5e5c-4b65-a124-684e2620b221\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.009986 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8dwv\" (UniqueName: \"kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv\") pod \"2f32f180-5e5c-4b65-a124-684e2620b221\" (UID: \"2f32f180-5e5c-4b65-a124-684e2620b221\") " Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.098128 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv" (OuterVolumeSpecName: "kube-api-access-x8dwv") pod "2f32f180-5e5c-4b65-a124-684e2620b221" (UID: "2f32f180-5e5c-4b65-a124-684e2620b221"). InnerVolumeSpecName "kube-api-access-x8dwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.116998 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8dwv\" (UniqueName: \"kubernetes.io/projected/2f32f180-5e5c-4b65-a124-684e2620b221-kube-api-access-x8dwv\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.149780 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config" (OuterVolumeSpecName: "config") pod "2f32f180-5e5c-4b65-a124-684e2620b221" (UID: "2f32f180-5e5c-4b65-a124-684e2620b221"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.186204 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2f32f180-5e5c-4b65-a124-684e2620b221" (UID: "2f32f180-5e5c-4b65-a124-684e2620b221"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.193699 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2f32f180-5e5c-4b65-a124-684e2620b221" (UID: "2f32f180-5e5c-4b65-a124-684e2620b221"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.195490 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2f32f180-5e5c-4b65-a124-684e2620b221" (UID: "2f32f180-5e5c-4b65-a124-684e2620b221"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.218814 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.218883 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.218911 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.218924 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f32f180-5e5c-4b65-a124-684e2620b221-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.358764 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fxmkt" podUID="590c61a6-8355-4f4f-be2b-4680745b4732" containerName="ovn-controller" probeResult="failure" output=< Jan 27 20:24:28 crc kubenswrapper[4793]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 27 20:24:28 crc kubenswrapper[4793]: > Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.791822 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" event={"ID":"2f32f180-5e5c-4b65-a124-684e2620b221","Type":"ContainerDied","Data":"969a996d06760d3e79745e3324a5b356335d50dacd9ef3ba6d0aa8026a155aa2"} Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.792111 4793 scope.go:117] "RemoveContainer" containerID="6f2f32a0329e95e7cb4c1b463f131c5c692536c3ada7b33c5ddf928afdf46911" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.791964 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb456c8f9-mxg2b" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.833748 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-rrm85" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.833811 4793 generic.go:334] "Generic (PLEG): container finished" podID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerID="1769161e8634efcc339439e61f08418e2f508bc1000ed8a23a5128fa680a685d" exitCode=0 Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.833876 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerDied","Data":"1769161e8634efcc339439e61f08418e2f508bc1000ed8a23a5128fa680a685d"} Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.857196 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:28 crc kubenswrapper[4793]: E0127 20:24:28.857901 4793 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 20:24:28 crc kubenswrapper[4793]: E0127 20:24:28.857999 4793 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 20:24:28 crc kubenswrapper[4793]: E0127 20:24:28.858108 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift podName:6a954bdd-89aa-4d5c-8034-5c8ed27e8652 nodeName:}" failed. No retries permitted until 2026-01-27 20:24:44.858091309 +0000 UTC m=+1310.248344465 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift") pod "swift-storage-0" (UID: "6a954bdd-89aa-4d5c-8034-5c8ed27e8652") : configmap "swift-ring-files" not found Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.864761 4793 scope.go:117] "RemoveContainer" containerID="6d45b8d849ef0856985497e40bc7eeda14f733fd40ed3941dfe84a98a33cea46" Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.864884 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:24:28 crc kubenswrapper[4793]: I0127 20:24:28.874170 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cb456c8f9-mxg2b"] Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.262863 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fxmkt-config-8mp5w"] Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263571 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d33de24-4f5c-4cff-8da3-7848753edd2a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263590 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d33de24-4f5c-4cff-8da3-7848753edd2a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263602 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263609 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263623 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c672ac-584e-4777-8c08-4f78c6286686" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263629 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c672ac-584e-4777-8c08-4f78c6286686" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263643 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a267a0de-2ca0-4324-a99d-faf15e41e8ff" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263649 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a267a0de-2ca0-4324-a99d-faf15e41e8ff" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263658 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9047e5d3-ba8c-49d3-af56-6f6b3a090759" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263664 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9047e5d3-ba8c-49d3-af56-6f6b3a090759" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263679 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ab0f0f-0759-461c-b18e-ca19b1a627e0" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263686 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ab0f0f-0759-461c-b18e-ca19b1a627e0" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263696 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="init" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263703 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="init" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263716 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="dnsmasq-dns" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263722 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="dnsmasq-dns" Jan 27 20:24:29 crc kubenswrapper[4793]: E0127 20:24:29.263730 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c2d66d7-1414-4bc8-9131-9af3080a5c4f" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263736 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c2d66d7-1414-4bc8-9131-9af3080a5c4f" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263959 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c672ac-584e-4777-8c08-4f78c6286686" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263975 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9047e5d3-ba8c-49d3-af56-6f6b3a090759" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.263994 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a267a0de-2ca0-4324-a99d-faf15e41e8ff" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264008 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d33de24-4f5c-4cff-8da3-7848753edd2a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264017 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264031 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" containerName="dnsmasq-dns" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264040 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ab0f0f-0759-461c-b18e-ca19b1a627e0" containerName="mariadb-account-create-update" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264052 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c2d66d7-1414-4bc8-9131-9af3080a5c4f" containerName="mariadb-database-create" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.264624 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.267328 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.292035 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt-config-8mp5w"] Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.367418 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.367661 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.367727 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.368014 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.368073 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.368147 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7q7d\" (UniqueName: \"kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546350 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546624 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546668 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546701 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7q7d\" (UniqueName: \"kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546768 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546866 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.546899 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.547048 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.547476 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.548008 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.551345 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.583355 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7q7d\" (UniqueName: \"kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d\") pod \"ovn-controller-fxmkt-config-8mp5w\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.814019 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f32f180-5e5c-4b65-a124-684e2620b221" path="/var/lib/kubelet/pods/2f32f180-5e5c-4b65-a124-684e2620b221/volumes" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.847263 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerStarted","Data":"642c4da046cb49592c2dbdb5ea15fec53c4d366602f78d9dd9a9adc6392c53a3"} Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.848328 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.881205 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:29 crc kubenswrapper[4793]: I0127 20:24:29.883256 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371947.971529 podStartE2EDuration="1m28.883246036s" podCreationTimestamp="2026-01-27 20:23:01 +0000 UTC" firstStartedPulling="2026-01-27 20:23:07.205289807 +0000 UTC m=+1212.595542963" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:29.882843057 +0000 UTC m=+1295.273096233" watchObservedRunningTime="2026-01-27 20:24:29.883246036 +0000 UTC m=+1295.273499192" Jan 27 20:24:30 crc kubenswrapper[4793]: I0127 20:24:30.282889 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:30 crc kubenswrapper[4793]: I0127 20:24:30.308592 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:30 crc kubenswrapper[4793]: I0127 20:24:30.502563 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt-config-8mp5w"] Jan 27 20:24:30 crc kubenswrapper[4793]: I0127 20:24:30.876737 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-8mp5w" event={"ID":"891dfd22-93e0-495a-a90c-b880ea7d41f0","Type":"ContainerStarted","Data":"80dec2df52bfff2a72389547edcec2995d07197de0586f373127170c6794efc2"} Jan 27 20:24:30 crc kubenswrapper[4793]: I0127 20:24:30.881326 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:31 crc kubenswrapper[4793]: I0127 20:24:31.885956 4793 generic.go:334] "Generic (PLEG): container finished" podID="891dfd22-93e0-495a-a90c-b880ea7d41f0" containerID="6a841516b7c13b7c410bb8772ab161cef66a5a5372bb096a0082970b77e7332d" exitCode=0 Jan 27 20:24:31 crc kubenswrapper[4793]: I0127 20:24:31.886069 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-8mp5w" event={"ID":"891dfd22-93e0-495a-a90c-b880ea7d41f0","Type":"ContainerDied","Data":"6a841516b7c13b7c410bb8772ab161cef66a5a5372bb096a0082970b77e7332d"} Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.628585 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-fxmkt" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.651279 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.851395 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.851415 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run" (OuterVolumeSpecName: "var-run") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.852229 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.852400 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7q7d\" (UniqueName: \"kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.852333 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.852761 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.852942 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.853040 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts\") pod \"891dfd22-93e0-495a-a90c-b880ea7d41f0\" (UID: \"891dfd22-93e0-495a-a90c-b880ea7d41f0\") " Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.853067 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.853584 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.853872 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts" (OuterVolumeSpecName: "scripts") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.854426 4793 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.854538 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.854668 4793 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.854772 4793 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/891dfd22-93e0-495a-a90c-b880ea7d41f0-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.854851 4793 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/891dfd22-93e0-495a-a90c-b880ea7d41f0-var-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:33 crc kubenswrapper[4793]: I0127 20:24:33.864057 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d" (OuterVolumeSpecName: "kube-api-access-h7q7d") pod "891dfd22-93e0-495a-a90c-b880ea7d41f0" (UID: "891dfd22-93e0-495a-a90c-b880ea7d41f0"). InnerVolumeSpecName "kube-api-access-h7q7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.021918 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7q7d\" (UniqueName: \"kubernetes.io/projected/891dfd22-93e0-495a-a90c-b880ea7d41f0-kube-api-access-h7q7d\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.032101 4793 generic.go:334] "Generic (PLEG): container finished" podID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerID="c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa" exitCode=0 Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.032198 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerDied","Data":"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa"} Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.034676 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-8mp5w" event={"ID":"891dfd22-93e0-495a-a90c-b880ea7d41f0","Type":"ContainerDied","Data":"80dec2df52bfff2a72389547edcec2995d07197de0586f373127170c6794efc2"} Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.034709 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80dec2df52bfff2a72389547edcec2995d07197de0586f373127170c6794efc2" Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.034849 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-8mp5w" Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.822377 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-fxmkt-config-8mp5w"] Jan 27 20:24:34 crc kubenswrapper[4793]: I0127 20:24:34.832296 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-fxmkt-config-8mp5w"] Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.265019 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerStarted","Data":"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495"} Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.265879 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.397020 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371945.457785 podStartE2EDuration="1m31.396990288s" podCreationTimestamp="2026-01-27 20:23:04 +0000 UTC" firstStartedPulling="2026-01-27 20:23:10.240581051 +0000 UTC m=+1215.630834207" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:35.378501533 +0000 UTC m=+1300.768754689" watchObservedRunningTime="2026-01-27 20:24:35.396990288 +0000 UTC m=+1300.787243444" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.439950 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fxmkt-config-cs8mq"] Jan 27 20:24:35 crc kubenswrapper[4793]: E0127 20:24:35.440494 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="891dfd22-93e0-495a-a90c-b880ea7d41f0" containerName="ovn-config" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.440518 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="891dfd22-93e0-495a-a90c-b880ea7d41f0" containerName="ovn-config" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.440943 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="891dfd22-93e0-495a-a90c-b880ea7d41f0" containerName="ovn-config" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.442153 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.450093 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.473807 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt-config-cs8mq"] Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.544573 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.545809 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g469f\" (UniqueName: \"kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.545905 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.545928 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.545968 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.546070 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649744 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649814 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g469f\" (UniqueName: \"kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649868 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649883 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649909 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.649961 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.650270 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.650364 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.650788 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.653543 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.653685 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.702526 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g469f\" (UniqueName: \"kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f\") pod \"ovn-controller-fxmkt-config-cs8mq\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.774397 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.818571 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="891dfd22-93e0-495a-a90c-b880ea7d41f0" path="/var/lib/kubelet/pods/891dfd22-93e0-495a-a90c-b880ea7d41f0/volumes" Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.861892 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.862408 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="prometheus" containerID="cri-o://f28c44283d04df364885d9029074b7e6b250419350041bf36500e618101a284e" gracePeriod=600 Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.863216 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="thanos-sidecar" containerID="cri-o://9780dab57b81cbb94d3c418e6f1865a3cfde246b0c2e32c0c1cf812c5f677dd8" gracePeriod=600 Jan 27 20:24:35 crc kubenswrapper[4793]: I0127 20:24:35.863299 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="config-reloader" containerID="cri-o://d3bd107fee0835adeafd44074ea03d8907cd9432884fed26cfdca79e2cf69aee" gracePeriod=600 Jan 27 20:24:36 crc kubenswrapper[4793]: I0127 20:24:36.279350 4793 generic.go:334] "Generic (PLEG): container finished" podID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerID="9780dab57b81cbb94d3c418e6f1865a3cfde246b0c2e32c0c1cf812c5f677dd8" exitCode=0 Jan 27 20:24:36 crc kubenswrapper[4793]: I0127 20:24:36.279872 4793 generic.go:334] "Generic (PLEG): container finished" podID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerID="f28c44283d04df364885d9029074b7e6b250419350041bf36500e618101a284e" exitCode=0 Jan 27 20:24:36 crc kubenswrapper[4793]: I0127 20:24:36.279470 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerDied","Data":"9780dab57b81cbb94d3c418e6f1865a3cfde246b0c2e32c0c1cf812c5f677dd8"} Jan 27 20:24:36 crc kubenswrapper[4793]: I0127 20:24:36.280026 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerDied","Data":"f28c44283d04df364885d9029074b7e6b250419350041bf36500e618101a284e"} Jan 27 20:24:36 crc kubenswrapper[4793]: I0127 20:24:36.544185 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fxmkt-config-cs8mq"] Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.298115 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-cs8mq" event={"ID":"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54","Type":"ContainerStarted","Data":"8dc3a754e1e48f0ce5c63319685061290592009e62276a55f05fff31da8b1562"} Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.298580 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-cs8mq" event={"ID":"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54","Type":"ContainerStarted","Data":"c161cb51cb63c85fb826249d1b07cb6b1d85054e5b56d18ff614f01edd1cfd21"} Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.306451 4793 generic.go:334] "Generic (PLEG): container finished" podID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerID="d3bd107fee0835adeafd44074ea03d8907cd9432884fed26cfdca79e2cf69aee" exitCode=0 Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.306497 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerDied","Data":"d3bd107fee0835adeafd44074ea03d8907cd9432884fed26cfdca79e2cf69aee"} Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.332781 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-fxmkt-config-cs8mq" podStartSLOduration=2.332762293 podStartE2EDuration="2.332762293s" podCreationTimestamp="2026-01-27 20:24:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:37.327580973 +0000 UTC m=+1302.717834129" watchObservedRunningTime="2026-01-27 20:24:37.332762293 +0000 UTC m=+1302.723015469" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.900850 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920186 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920361 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920398 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920447 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920762 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920804 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920916 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.920991 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.921042 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5hnc\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.921312 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.924962 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.925142 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2\") pod \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\" (UID: \"a290e10d-fe47-4a6c-bb5c-e5306fefd090\") " Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.928092 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.930467 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.942304 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.946833 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc" (OuterVolumeSpecName: "kube-api-access-w5hnc") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "kube-api-access-w5hnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.946935 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out" (OuterVolumeSpecName: "config-out") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947022 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config" (OuterVolumeSpecName: "config") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947853 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947874 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5hnc\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-kube-api-access-w5hnc\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947884 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947894 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a290e10d-fe47-4a6c-bb5c-e5306fefd090-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947904 4793 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a290e10d-fe47-4a6c-bb5c-e5306fefd090-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947914 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947930 4793 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a290e10d-fe47-4a6c-bb5c-e5306fefd090-config-out\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.947965 4793 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:37 crc kubenswrapper[4793]: I0127 20:24:37.968077 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config" (OuterVolumeSpecName: "web-config") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.038352 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "a290e10d-fe47-4a6c-bb5c-e5306fefd090" (UID: "a290e10d-fe47-4a6c-bb5c-e5306fefd090"). InnerVolumeSpecName "pvc-a92de854-8671-4eba-9b5d-1a749083f30b". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.050083 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") on node \"crc\" " Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.050123 4793 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a290e10d-fe47-4a6c-bb5c-e5306fefd090-web-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.071695 4793 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.072042 4793 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a92de854-8671-4eba-9b5d-1a749083f30b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b") on node "crc" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.152104 4793 reconciler_common.go:293] "Volume detached for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.579584 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a290e10d-fe47-4a6c-bb5c-e5306fefd090","Type":"ContainerDied","Data":"95b8c9d6e3750a3e5652347263911dfc1d993ab9444ada3e03519c7300c56696"} Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.580835 4793 scope.go:117] "RemoveContainer" containerID="9780dab57b81cbb94d3c418e6f1865a3cfde246b0c2e32c0c1cf812c5f677dd8" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.581061 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.666039 4793 generic.go:334] "Generic (PLEG): container finished" podID="2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" containerID="8dc3a754e1e48f0ce5c63319685061290592009e62276a55f05fff31da8b1562" exitCode=0 Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.666214 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-cs8mq" event={"ID":"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54","Type":"ContainerDied","Data":"8dc3a754e1e48f0ce5c63319685061290592009e62276a55f05fff31da8b1562"} Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.722789 4793 generic.go:334] "Generic (PLEG): container finished" podID="f5ddc141-eae8-4a4c-b118-a79a9276cf33" containerID="4a916b4844ef44b3ff72e48eb0693c696ad53c40953c04cc6734caf0998de5bf" exitCode=0 Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.722916 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.723054 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2m2n7" event={"ID":"f5ddc141-eae8-4a4c-b118-a79a9276cf33","Type":"ContainerDied","Data":"4a916b4844ef44b3ff72e48eb0693c696ad53c40953c04cc6734caf0998de5bf"} Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.753657 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.772767 4793 scope.go:117] "RemoveContainer" containerID="d3bd107fee0835adeafd44074ea03d8907cd9432884fed26cfdca79e2cf69aee" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.802635 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:38 crc kubenswrapper[4793]: E0127 20:24:38.803311 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="config-reloader" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803341 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="config-reloader" Jan 27 20:24:38 crc kubenswrapper[4793]: E0127 20:24:38.803357 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="init-config-reloader" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803368 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="init-config-reloader" Jan 27 20:24:38 crc kubenswrapper[4793]: E0127 20:24:38.803381 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="thanos-sidecar" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803390 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="thanos-sidecar" Jan 27 20:24:38 crc kubenswrapper[4793]: E0127 20:24:38.803416 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="prometheus" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803424 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="prometheus" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803679 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="config-reloader" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803703 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="prometheus" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.803716 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" containerName="thanos-sidecar" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.806028 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.833941 4793 scope.go:117] "RemoveContainer" containerID="f28c44283d04df364885d9029074b7e6b250419350041bf36500e618101a284e" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.838642 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.862072 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.862435 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.862617 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.862770 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.862912 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.863917 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-h8vj4" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.864177 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.864311 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 27 20:24:38 crc kubenswrapper[4793]: I0127 20:24:38.867187 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164079 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164160 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164191 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164248 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164272 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164296 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164316 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164355 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164413 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164450 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164489 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164519 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.164578 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvmhw\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.266828 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvmhw\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.267652 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268587 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268675 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268700 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268719 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268743 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268763 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268804 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268854 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268893 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268931 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.268958 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.273088 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.273100 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.273569 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.274476 4793 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.274520 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8312f000923a56e203c1e13376862fe23daea3f78fa537b754b51784691fd00c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.276066 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.283170 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.284404 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.284976 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.285008 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.287391 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.288265 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.290211 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvmhw\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.291515 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.377024 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.433171 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.597528 4793 scope.go:117] "RemoveContainer" containerID="2f5bc73680dbd5086bded0a32b53b08a40c7302f3f3ec85df280dc0f533103f9" Jan 27 20:24:39 crc kubenswrapper[4793]: I0127 20:24:39.828135 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a290e10d-fe47-4a6c-bb5c-e5306fefd090" path="/var/lib/kubelet/pods/a290e10d-fe47-4a6c-bb5c-e5306fefd090/volumes" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.125414 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.211002 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.244691 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291114 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291168 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291216 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpdvp\" (UniqueName: \"kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291249 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291354 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291401 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.291510 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle\") pod \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\" (UID: \"f5ddc141-eae8-4a4c-b118-a79a9276cf33\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.292850 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.300811 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.300998 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp" (OuterVolumeSpecName: "kube-api-access-dpdvp") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "kube-api-access-dpdvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.303487 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.325994 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.330038 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.339318 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts" (OuterVolumeSpecName: "scripts") pod "f5ddc141-eae8-4a4c-b118-a79a9276cf33" (UID: "f5ddc141-eae8-4a4c-b118-a79a9276cf33"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.391398 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="d13b401d-8f36-4677-b782-ebf9a3d5daab" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.110:5671: connect: connection refused" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.392938 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.393019 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.393115 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.393224 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run" (OuterVolumeSpecName: "var-run") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.393292 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.393426 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g469f\" (UniqueName: \"kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394031 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts" (OuterVolumeSpecName: "scripts") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394139 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394208 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts\") pod \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\" (UID: \"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54\") " Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394609 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394629 4793 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f5ddc141-eae8-4a4c-b118-a79a9276cf33-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394638 4793 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394648 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394656 4793 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394666 4793 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394675 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394683 4793 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f5ddc141-eae8-4a4c-b118-a79a9276cf33-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394692 4793 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f5ddc141-eae8-4a4c-b118-a79a9276cf33-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.394700 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpdvp\" (UniqueName: \"kubernetes.io/projected/f5ddc141-eae8-4a4c-b118-a79a9276cf33-kube-api-access-dpdvp\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.395096 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.395127 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.397839 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f" (OuterVolumeSpecName: "kube-api-access-g469f") pod "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" (UID: "2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54"). InnerVolumeSpecName "kube-api-access-g469f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.496535 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g469f\" (UniqueName: \"kubernetes.io/projected/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-kube-api-access-g469f\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.496803 4793 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.496813 4793 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.748923 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-2m2n7" event={"ID":"f5ddc141-eae8-4a4c-b118-a79a9276cf33","Type":"ContainerDied","Data":"397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27"} Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.748962 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="397716be4920956a502ac8c6bd2971932d259f6024900cbc105aaa401ad0cc27" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.748968 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-2m2n7" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.751250 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerStarted","Data":"befe29c51bc75645a18d0f70ec4136e3de44b5e67e7201c399ae500cc75092a7"} Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.753240 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fxmkt-config-cs8mq" event={"ID":"2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54","Type":"ContainerDied","Data":"c161cb51cb63c85fb826249d1b07cb6b1d85054e5b56d18ff614f01edd1cfd21"} Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.753274 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c161cb51cb63c85fb826249d1b07cb6b1d85054e5b56d18ff614f01edd1cfd21" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.753334 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fxmkt-config-cs8mq" Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.838682 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-fxmkt-config-cs8mq"] Jan 27 20:24:40 crc kubenswrapper[4793]: I0127 20:24:40.845532 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-fxmkt-config-cs8mq"] Jan 27 20:24:41 crc kubenswrapper[4793]: I0127 20:24:41.813859 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" path="/var/lib/kubelet/pods/2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54/volumes" Jan 27 20:24:43 crc kubenswrapper[4793]: I0127 20:24:43.777222 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerStarted","Data":"4c1a3bf5c396e29ae30cb20590b0929da652bede8fde41fa9b99919957cf9d28"} Jan 27 20:24:44 crc kubenswrapper[4793]: I0127 20:24:44.580363 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Jan 27 20:24:44 crc kubenswrapper[4793]: I0127 20:24:44.877784 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:44 crc kubenswrapper[4793]: I0127 20:24:44.894533 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6a954bdd-89aa-4d5c-8034-5c8ed27e8652-etc-swift\") pod \"swift-storage-0\" (UID: \"6a954bdd-89aa-4d5c-8034-5c8ed27e8652\") " pod="openstack/swift-storage-0" Jan 27 20:24:45 crc kubenswrapper[4793]: I0127 20:24:45.106230 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 27 20:24:45 crc kubenswrapper[4793]: I0127 20:24:45.663374 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 27 20:24:45 crc kubenswrapper[4793]: I0127 20:24:45.794480 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"abf9982c2fcebbc96e5e856ffa76dd3a3e02f83a97bfa4ce8fb5a6a2d2f62df0"} Jan 27 20:24:47 crc kubenswrapper[4793]: I0127 20:24:47.028848 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Jan 27 20:24:48 crc kubenswrapper[4793]: I0127 20:24:48.823398 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"bc18447f4801220bb6364da5d97c592cb56f300bb352773110ead25ecd7b7601"} Jan 27 20:24:48 crc kubenswrapper[4793]: I0127 20:24:48.823907 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"b6f7e8c523b21cfa50179babdc7fdb3bd45e9b9562b850f2564487b103165137"} Jan 27 20:24:48 crc kubenswrapper[4793]: I0127 20:24:48.823920 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"0c345a4eed2845d4279d33e236259e046f152716e5035de74ddb918e1ad4aecc"} Jan 27 20:24:50 crc kubenswrapper[4793]: I0127 20:24:50.101462 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"e2f8f6370c436ab57eaace23826300b6aaf9414a74f502299769f89cc8f81410"} Jan 27 20:24:50 crc kubenswrapper[4793]: I0127 20:24:50.102861 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"50bff0ad8527b4e0f8fecfa372c4d1e396b9025c5453b932118c6274aa262144"} Jan 27 20:24:50 crc kubenswrapper[4793]: I0127 20:24:50.102914 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"4412ca836a5184224a31b6bc06f768d2b114b344298ec5bf6d1677ced666d9ee"} Jan 27 20:24:50 crc kubenswrapper[4793]: I0127 20:24:50.392757 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Jan 27 20:24:51 crc kubenswrapper[4793]: I0127 20:24:51.112816 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"9e36a5eb8f0ad94dd4761ff7b222c971ef92443fa0b1ad859c6f04eed76d3a64"} Jan 27 20:24:51 crc kubenswrapper[4793]: I0127 20:24:51.113150 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"2db8fcfd0ee3591ecd31bf82f56195003ef0835c6697f1dcd639e1b59be20e97"} Jan 27 20:24:51 crc kubenswrapper[4793]: I0127 20:24:51.114538 4793 generic.go:334] "Generic (PLEG): container finished" podID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerID="4c1a3bf5c396e29ae30cb20590b0929da652bede8fde41fa9b99919957cf9d28" exitCode=0 Jan 27 20:24:51 crc kubenswrapper[4793]: I0127 20:24:51.114585 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerDied","Data":"4c1a3bf5c396e29ae30cb20590b0929da652bede8fde41fa9b99919957cf9d28"} Jan 27 20:24:52 crc kubenswrapper[4793]: I0127 20:24:52.128479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerStarted","Data":"7d36850131a63c8578cfc954d30b5b5af3261ba1543f9c3998557f9d1b782589"} Jan 27 20:24:53 crc kubenswrapper[4793]: I0127 20:24:53.145704 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"af82e076fd8eebfa57a5b5cc3fc3b56efa1653e856d6c1cf5c1d56bdb02f1213"} Jan 27 20:24:53 crc kubenswrapper[4793]: I0127 20:24:53.146019 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"5e0c28c2e5cea6d86d14af9056133d2048c7e2dd857a32e189b2b71f5dcc0700"} Jan 27 20:24:53 crc kubenswrapper[4793]: I0127 20:24:53.146037 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"32332f89f2be2d3f22b4cea5e70784f0c05a0d83f1c4260af65b36a9862b250a"} Jan 27 20:24:53 crc kubenswrapper[4793]: I0127 20:24:53.146049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"a3c207db85a79ada5db5c4b85e4a629f641a6894b2bd96c449fb7a399f6fed21"} Jan 27 20:24:54 crc kubenswrapper[4793]: I0127 20:24:54.259705 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"500c709215c54f6e72a03f7e5d9f9b863920889b44f8597b7d8d06123050683a"} Jan 27 20:24:54 crc kubenswrapper[4793]: I0127 20:24:54.260160 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"5d4a5aa55011915fc27c71deb58121d3990b50f83eb10d8d60225670e9907795"} Jan 27 20:24:54 crc kubenswrapper[4793]: I0127 20:24:54.578613 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.275585 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6a954bdd-89aa-4d5c-8034-5c8ed27e8652","Type":"ContainerStarted","Data":"9615bacbb5b84124b6c9b0e5bc2be9157ecf6d9ece10925441d4a07e676d6d78"} Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.288084 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerStarted","Data":"31bce5819a13bec1ecd73bfb99e884ed3aac050dd87958abc11a1b61cbbfd5be"} Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.340892 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.937340217 podStartE2EDuration="44.340867917s" podCreationTimestamp="2026-01-27 20:24:11 +0000 UTC" firstStartedPulling="2026-01-27 20:24:45.664372474 +0000 UTC m=+1311.054625630" lastFinishedPulling="2026-01-27 20:24:52.067900174 +0000 UTC m=+1317.458153330" observedRunningTime="2026-01-27 20:24:55.336767114 +0000 UTC m=+1320.727020280" watchObservedRunningTime="2026-01-27 20:24:55.340867917 +0000 UTC m=+1320.731121073" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.469448 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5mzxt"] Jan 27 20:24:55 crc kubenswrapper[4793]: E0127 20:24:55.469889 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" containerName="ovn-config" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.469914 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" containerName="ovn-config" Jan 27 20:24:55 crc kubenswrapper[4793]: E0127 20:24:55.469950 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ddc141-eae8-4a4c-b118-a79a9276cf33" containerName="swift-ring-rebalance" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.469959 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ddc141-eae8-4a4c-b118-a79a9276cf33" containerName="swift-ring-rebalance" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.470165 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ddc141-eae8-4a4c-b118-a79a9276cf33" containerName="swift-ring-rebalance" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.470191 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b6baeb7-8f05-4f9d-8986-0dcaa99d9f54" containerName="ovn-config" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.470902 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.481529 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5mzxt"] Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.515079 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.515176 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5q22\" (UniqueName: \"kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.570455 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-ee28-account-create-update-rdbqh"] Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.571565 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.575095 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.617258 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46gnj\" (UniqueName: \"kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.617344 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.617397 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.617508 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5q22\" (UniqueName: \"kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.618870 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.628969 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ee28-account-create-update-rdbqh"] Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.646824 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5q22\" (UniqueName: \"kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22\") pod \"glance-db-create-5mzxt\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.718264 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.718451 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46gnj\" (UniqueName: \"kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.718989 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.739388 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46gnj\" (UniqueName: \"kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj\") pod \"glance-ee28-account-create-update-rdbqh\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.794611 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.837254 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.839722 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.846399 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.887489 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.905864 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.938994 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.939104 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvpx2\" (UniqueName: \"kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.939196 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.939215 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.939243 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:55 crc kubenswrapper[4793]: I0127 20:24:55.939266 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040719 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040777 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040812 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040854 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040954 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.040993 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvpx2\" (UniqueName: \"kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.041943 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.042111 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.044294 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.044685 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.045385 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.122907 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvpx2\" (UniqueName: \"kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2\") pod \"dnsmasq-dns-76976dd897-x6b4b\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.192300 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.337360 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerStarted","Data":"9b81fa38a969610a815769431922e292c17ec191f0c81c0f15e528dbeb35342e"} Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.440483 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.440458962 podStartE2EDuration="18.440458962s" podCreationTimestamp="2026-01-27 20:24:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:56.429441114 +0000 UTC m=+1321.819694270" watchObservedRunningTime="2026-01-27 20:24:56.440458962 +0000 UTC m=+1321.830712118" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.609742 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ee28-account-create-update-rdbqh"] Jan 27 20:24:56 crc kubenswrapper[4793]: W0127 20:24:56.628447 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podefa4e39e_3cd2_4c39_bc58_b257b5bf95a7.slice/crio-06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e WatchSource:0}: Error finding container 06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e: Status 404 returned error can't find the container with id 06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.635695 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.793082 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:24:56 crc kubenswrapper[4793]: W0127 20:24:56.793492 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c367af8_d76d_4cbc_8fa4_729babd56421.slice/crio-340c28069a12214055208c774e1383bcca4d51d0c7d36907140fe80ff240ab8a WatchSource:0}: Error finding container 340c28069a12214055208c774e1383bcca4d51d0c7d36907140fe80ff240ab8a: Status 404 returned error can't find the container with id 340c28069a12214055208c774e1383bcca4d51d0c7d36907140fe80ff240ab8a Jan 27 20:24:56 crc kubenswrapper[4793]: I0127 20:24:56.818742 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5mzxt"] Jan 27 20:24:56 crc kubenswrapper[4793]: W0127 20:24:56.830301 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6e54982_02cd_4003_8bf4_e10dfade4061.slice/crio-ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48 WatchSource:0}: Error finding container ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48: Status 404 returned error can't find the container with id ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48 Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.028576 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.358990 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ee28-account-create-update-rdbqh" event={"ID":"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7","Type":"ContainerStarted","Data":"e69b45fbb1ba4b545adc21dce64abe8295dd66bf43f67384b51bc79a3ebc1d01"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.359358 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ee28-account-create-update-rdbqh" event={"ID":"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7","Type":"ContainerStarted","Data":"06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.361972 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5mzxt" event={"ID":"f6e54982-02cd-4003-8bf4-e10dfade4061","Type":"ContainerStarted","Data":"752aff503dc687269961023c25f7b1d7d9241bfd3057ee6adef0215de8df6e29"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.362020 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5mzxt" event={"ID":"f6e54982-02cd-4003-8bf4-e10dfade4061","Type":"ContainerStarted","Data":"ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.367699 4793 generic.go:334] "Generic (PLEG): container finished" podID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerID="218a0f7a5f4d724886237bfaa1b750528812e01fcead661dc7e635bd3a2aeb47" exitCode=0 Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.367799 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" event={"ID":"2c367af8-d76d-4cbc-8fa4-729babd56421","Type":"ContainerDied","Data":"218a0f7a5f4d724886237bfaa1b750528812e01fcead661dc7e635bd3a2aeb47"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.367838 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" event={"ID":"2c367af8-d76d-4cbc-8fa4-729babd56421","Type":"ContainerStarted","Data":"340c28069a12214055208c774e1383bcca4d51d0c7d36907140fe80ff240ab8a"} Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.386255 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-ee28-account-create-update-rdbqh" podStartSLOduration=2.386229952 podStartE2EDuration="2.386229952s" podCreationTimestamp="2026-01-27 20:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:57.375563894 +0000 UTC m=+1322.765817050" watchObservedRunningTime="2026-01-27 20:24:57.386229952 +0000 UTC m=+1322.776483108" Jan 27 20:24:57 crc kubenswrapper[4793]: I0127 20:24:57.428966 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-5mzxt" podStartSLOduration=2.428951028 podStartE2EDuration="2.428951028s" podCreationTimestamp="2026-01-27 20:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:57.425582923 +0000 UTC m=+1322.815836079" watchObservedRunningTime="2026-01-27 20:24:57.428951028 +0000 UTC m=+1322.819204174" Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.378270 4793 generic.go:334] "Generic (PLEG): container finished" podID="efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" containerID="e69b45fbb1ba4b545adc21dce64abe8295dd66bf43f67384b51bc79a3ebc1d01" exitCode=0 Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.378350 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ee28-account-create-update-rdbqh" event={"ID":"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7","Type":"ContainerDied","Data":"e69b45fbb1ba4b545adc21dce64abe8295dd66bf43f67384b51bc79a3ebc1d01"} Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.380223 4793 generic.go:334] "Generic (PLEG): container finished" podID="f6e54982-02cd-4003-8bf4-e10dfade4061" containerID="752aff503dc687269961023c25f7b1d7d9241bfd3057ee6adef0215de8df6e29" exitCode=0 Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.380263 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5mzxt" event={"ID":"f6e54982-02cd-4003-8bf4-e10dfade4061","Type":"ContainerDied","Data":"752aff503dc687269961023c25f7b1d7d9241bfd3057ee6adef0215de8df6e29"} Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.382350 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" event={"ID":"2c367af8-d76d-4cbc-8fa4-729babd56421","Type":"ContainerStarted","Data":"3601776dffcf1c7db9e0cdc9e08315be1bce4aca1486b21b5ea839d8aee0fe1c"} Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.382576 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:24:58 crc kubenswrapper[4793]: I0127 20:24:58.446284 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" podStartSLOduration=3.446243349 podStartE2EDuration="3.446243349s" podCreationTimestamp="2026-01-27 20:24:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:24:58.416106831 +0000 UTC m=+1323.806359987" watchObservedRunningTime="2026-01-27 20:24:58.446243349 +0000 UTC m=+1323.836496515" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.434454 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.769515 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.777210 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5mzxt" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.809801 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts\") pod \"f6e54982-02cd-4003-8bf4-e10dfade4061\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.810313 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts\") pod \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.810360 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6e54982-02cd-4003-8bf4-e10dfade4061" (UID: "f6e54982-02cd-4003-8bf4-e10dfade4061"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.810421 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5q22\" (UniqueName: \"kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22\") pod \"f6e54982-02cd-4003-8bf4-e10dfade4061\" (UID: \"f6e54982-02cd-4003-8bf4-e10dfade4061\") " Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.810464 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46gnj\" (UniqueName: \"kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj\") pod \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\" (UID: \"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7\") " Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.810892 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6e54982-02cd-4003-8bf4-e10dfade4061-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.821850 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22" (OuterVolumeSpecName: "kube-api-access-s5q22") pod "f6e54982-02cd-4003-8bf4-e10dfade4061" (UID: "f6e54982-02cd-4003-8bf4-e10dfade4061"). InnerVolumeSpecName "kube-api-access-s5q22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.880591 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" (UID: "efa4e39e-3cd2-4c39-bc58-b257b5bf95a7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.912402 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.912441 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5q22\" (UniqueName: \"kubernetes.io/projected/f6e54982-02cd-4003-8bf4-e10dfade4061-kube-api-access-s5q22\") on node \"crc\" DevicePath \"\"" Jan 27 20:24:59 crc kubenswrapper[4793]: I0127 20:24:59.936261 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj" (OuterVolumeSpecName: "kube-api-access-46gnj") pod "efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" (UID: "efa4e39e-3cd2-4c39-bc58-b257b5bf95a7"). InnerVolumeSpecName "kube-api-access-46gnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.013586 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46gnj\" (UniqueName: \"kubernetes.io/projected/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7-kube-api-access-46gnj\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.402640 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5mzxt" Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.402633 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5mzxt" event={"ID":"f6e54982-02cd-4003-8bf4-e10dfade4061","Type":"ContainerDied","Data":"ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48"} Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.403129 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff84df6c0707eb02c337ac7198bfe1ab6c539436c604923f89223f4e9fa63d48" Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.404239 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ee28-account-create-update-rdbqh" event={"ID":"efa4e39e-3cd2-4c39-bc58-b257b5bf95a7","Type":"ContainerDied","Data":"06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e"} Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.404387 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06bd0a86c0c657b14d978e1b27fd75d10744fc8d6de152afaf646ced79a5113e" Jan 27 20:25:00 crc kubenswrapper[4793]: I0127 20:25:00.404287 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ee28-account-create-update-rdbqh" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.579425 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.977829 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-58r7m"] Jan 27 20:25:04 crc kubenswrapper[4793]: E0127 20:25:04.980871 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" containerName="mariadb-account-create-update" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.980918 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" containerName="mariadb-account-create-update" Jan 27 20:25:04 crc kubenswrapper[4793]: E0127 20:25:04.980971 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e54982-02cd-4003-8bf4-e10dfade4061" containerName="mariadb-database-create" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.980980 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e54982-02cd-4003-8bf4-e10dfade4061" containerName="mariadb-database-create" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.981580 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" containerName="mariadb-account-create-update" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.981646 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e54982-02cd-4003-8bf4-e10dfade4061" containerName="mariadb-database-create" Jan 27 20:25:04 crc kubenswrapper[4793]: I0127 20:25:04.982901 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.001515 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-58r7m"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.023504 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.023635 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgt7t\" (UniqueName: \"kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.071844 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-sqcpz"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.073354 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.081098 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-423f-account-create-update-xxmkp"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.082371 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.085685 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.099853 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sqcpz"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.110276 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-423f-account-create-update-xxmkp"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129786 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129867 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggr5g\" (UniqueName: \"kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129896 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129924 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129946 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgt7t\" (UniqueName: \"kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.129974 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcwdk\" (UniqueName: \"kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.130691 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.161763 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgt7t\" (UniqueName: \"kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t\") pod \"barbican-db-create-58r7m\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.231463 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggr5g\" (UniqueName: \"kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.231556 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.231603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.231657 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcwdk\" (UniqueName: \"kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.232435 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.232755 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.249270 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5058-account-create-update-jrldw"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.250470 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.252633 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.267269 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5058-account-create-update-jrldw"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.273997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggr5g\" (UniqueName: \"kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g\") pod \"barbican-423f-account-create-update-xxmkp\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.286822 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcwdk\" (UniqueName: \"kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk\") pod \"cinder-db-create-sqcpz\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.328317 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.333208 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wbls\" (UniqueName: \"kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.333275 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.394637 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.397709 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-tsmxm"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.398927 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.404056 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.404329 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.404666 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.404860 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5strr" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.437620 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.438724 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.438933 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wbls\" (UniqueName: \"kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.439580 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.461303 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tsmxm"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.478126 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wbls\" (UniqueName: \"kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls\") pod \"cinder-5058-account-create-update-jrldw\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.481011 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-mvcq4"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.487707 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.495958 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-8fk8d" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.496362 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.510633 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-mvcq4"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541311 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d79sn\" (UniqueName: \"kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541382 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541424 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541498 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541528 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541609 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.541657 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2wxm\" (UniqueName: \"kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.550818 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-q7xrq"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.553147 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.574697 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.594181 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-q7xrq"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646502 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646784 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646840 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6szjn\" (UniqueName: \"kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646879 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2wxm\" (UniqueName: \"kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646935 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d79sn\" (UniqueName: \"kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.646988 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.647021 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.647070 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.647163 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.652975 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.655282 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.659397 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.660101 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.661661 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.665579 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d79sn\" (UniqueName: \"kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn\") pod \"watcher-db-sync-mvcq4\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.673142 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2wxm\" (UniqueName: \"kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm\") pod \"keystone-db-sync-tsmxm\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.751297 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6szjn\" (UniqueName: \"kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.751375 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.752064 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.758201 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ac6d-account-create-update-fd9n2"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.758750 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.759402 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.762749 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.800848 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ac6d-account-create-update-fd9n2"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.817206 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6szjn\" (UniqueName: \"kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn\") pod \"neutron-db-create-q7xrq\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.831031 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.865760 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.867422 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-gds2t"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.870183 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.870680 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmn2w\" (UniqueName: \"kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.886370 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.888833 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.889534 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-frbrb" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.916489 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-gds2t"] Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975053 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtqcs\" (UniqueName: \"kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975117 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975199 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975286 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975329 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmn2w\" (UniqueName: \"kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.975369 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:05 crc kubenswrapper[4793]: I0127 20:25:05.976208 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.011583 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmn2w\" (UniqueName: \"kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w\") pod \"neutron-ac6d-account-create-update-fd9n2\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.076701 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.076766 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtqcs\" (UniqueName: \"kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.076796 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.076850 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.080962 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.081206 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.081495 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.085620 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-58r7m"] Jan 27 20:25:06 crc kubenswrapper[4793]: W0127 20:25:06.086879 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9abb695_6b0a_423e_bccc_a6910c0cafc5.slice/crio-380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109 WatchSource:0}: Error finding container 380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109: Status 404 returned error can't find the container with id 380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109 Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.135130 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.143031 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtqcs\" (UniqueName: \"kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs\") pod \"glance-db-sync-gds2t\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.198787 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.247994 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gds2t" Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.316734 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.316961 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="dnsmasq-dns" containerID="cri-o://73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25" gracePeriod=10 Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.359183 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sqcpz"] Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.546190 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sqcpz" event={"ID":"94a9e18d-b12c-4edd-b7d5-0e976341ab95","Type":"ContainerStarted","Data":"7785cbc9551e1448e98827244d8ccc12f07e2b0ed8d05aff758f3c2e7c639d6b"} Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.561809 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-58r7m" event={"ID":"e9abb695-6b0a-423e-bccc-a6910c0cafc5","Type":"ContainerStarted","Data":"380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109"} Jan 27 20:25:06 crc kubenswrapper[4793]: I0127 20:25:06.590641 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-423f-account-create-update-xxmkp"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.037074 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.204818 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-mvcq4"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.247041 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tsmxm"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.282076 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5058-account-create-update-jrldw"] Jan 27 20:25:07 crc kubenswrapper[4793]: W0127 20:25:07.291098 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3093a815_0d7f_4490_96cb_87cb11a1eb4a.slice/crio-ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620 WatchSource:0}: Error finding container ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620: Status 404 returned error can't find the container with id ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620 Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.368875 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-q7xrq"] Jan 27 20:25:07 crc kubenswrapper[4793]: W0127 20:25:07.399561 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7db08350_41e5_47fc_912f_d2a00aef5fc6.slice/crio-d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929 WatchSource:0}: Error finding container d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929: Status 404 returned error can't find the container with id d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929 Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.401907 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.587807 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5058-account-create-update-jrldw" event={"ID":"3093a815-0d7f-4490-96cb-87cb11a1eb4a","Type":"ContainerStarted","Data":"2b480f404d36e6496570497fb774c548ea2f902fba91032176a35eaa1224d025"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.587851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5058-account-create-update-jrldw" event={"ID":"3093a815-0d7f-4490-96cb-87cb11a1eb4a","Type":"ContainerStarted","Data":"ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.592514 4793 generic.go:334] "Generic (PLEG): container finished" podID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerID="73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25" exitCode=0 Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.592561 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" event={"ID":"1959c2b9-5a70-4503-aef6-52dcfe28dd73","Type":"ContainerDied","Data":"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.592596 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" event={"ID":"1959c2b9-5a70-4503-aef6-52dcfe28dd73","Type":"ContainerDied","Data":"4e9dc7e08565d00a7b175ef0da39b42b160a0f2e9222c402f66f5d753fb7f7f5"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.592619 4793 scope.go:117] "RemoveContainer" containerID="73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.592652 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.596438 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9abb695-6b0a-423e-bccc-a6910c0cafc5" containerID="b13ae1e4db9ffa87d15ad9cff548164273cd413600e684bb90e9e745d6b22725" exitCode=0 Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.596506 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-58r7m" event={"ID":"e9abb695-6b0a-423e-bccc-a6910c0cafc5","Type":"ContainerDied","Data":"b13ae1e4db9ffa87d15ad9cff548164273cd413600e684bb90e9e745d6b22725"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.597912 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-q7xrq" event={"ID":"7db08350-41e5-47fc-912f-d2a00aef5fc6","Type":"ContainerStarted","Data":"d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.598957 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tsmxm" event={"ID":"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5","Type":"ContainerStarted","Data":"ccc83154ed0b68c4ada320c7103b4aa19536e9ddf7ef81b0162e8350a78918b9"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.601328 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-423f-account-create-update-xxmkp" event={"ID":"49c6c821-fd44-417d-9352-8f0a3443c80c","Type":"ContainerStarted","Data":"37ddeb194d858320bbff390426131055888110731a421183cbcb3424e8dad9c4"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.601357 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-423f-account-create-update-xxmkp" event={"ID":"49c6c821-fd44-417d-9352-8f0a3443c80c","Type":"ContainerStarted","Data":"06249edcaa096344d03a67c72626f00953be8aa6b7fc40b69300fc33495be4f9"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.604038 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-mvcq4" event={"ID":"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941","Type":"ContainerStarted","Data":"259f6ae9c96e28fdb25e140f07ce8cdc790fccf074af55427c583ef983607a3b"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.605617 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sqcpz" event={"ID":"94a9e18d-b12c-4edd-b7d5-0e976341ab95","Type":"ContainerStarted","Data":"95bf2cf70043fc83d341b65f0b8311c6a7cb175fdebc8e3c7e7a8c45e924a3c4"} Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.614465 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc\") pod \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.615790 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb\") pod \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.615824 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config\") pod \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.615895 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb\") pod \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.615950 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzg2l\" (UniqueName: \"kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l\") pod \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\" (UID: \"1959c2b9-5a70-4503-aef6-52dcfe28dd73\") " Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.626957 4793 scope.go:117] "RemoveContainer" containerID="4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.641145 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l" (OuterVolumeSpecName: "kube-api-access-bzg2l") pod "1959c2b9-5a70-4503-aef6-52dcfe28dd73" (UID: "1959c2b9-5a70-4503-aef6-52dcfe28dd73"). InnerVolumeSpecName "kube-api-access-bzg2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.670605 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-5058-account-create-update-jrldw" podStartSLOduration=2.651523675 podStartE2EDuration="2.651523675s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:07.648047227 +0000 UTC m=+1333.038300393" watchObservedRunningTime="2026-01-27 20:25:07.651523675 +0000 UTC m=+1333.041776831" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.698289 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-423f-account-create-update-xxmkp" podStartSLOduration=2.698270042 podStartE2EDuration="2.698270042s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:07.69701545 +0000 UTC m=+1333.087268616" watchObservedRunningTime="2026-01-27 20:25:07.698270042 +0000 UTC m=+1333.088523198" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.722586 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzg2l\" (UniqueName: \"kubernetes.io/projected/1959c2b9-5a70-4503-aef6-52dcfe28dd73-kube-api-access-bzg2l\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.726738 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ac6d-account-create-update-fd9n2"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.743100 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1959c2b9-5a70-4503-aef6-52dcfe28dd73" (UID: "1959c2b9-5a70-4503-aef6-52dcfe28dd73"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.751052 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config" (OuterVolumeSpecName: "config") pod "1959c2b9-5a70-4503-aef6-52dcfe28dd73" (UID: "1959c2b9-5a70-4503-aef6-52dcfe28dd73"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:07 crc kubenswrapper[4793]: W0127 20:25:07.767915 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod074b8f4c_0ac5_4a1b_8d9e_e7491dd9758d.slice/crio-e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7 WatchSource:0}: Error finding container e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7: Status 404 returned error can't find the container with id e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7 Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.831673 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1959c2b9-5a70-4503-aef6-52dcfe28dd73" (UID: "1959c2b9-5a70-4503-aef6-52dcfe28dd73"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.856277 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.856478 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.856491 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.863100 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-sqcpz" podStartSLOduration=2.863077773 podStartE2EDuration="2.863077773s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:07.759007372 +0000 UTC m=+1333.149260538" watchObservedRunningTime="2026-01-27 20:25:07.863077773 +0000 UTC m=+1333.253330929" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.883576 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1959c2b9-5a70-4503-aef6-52dcfe28dd73" (UID: "1959c2b9-5a70-4503-aef6-52dcfe28dd73"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.916007 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-gds2t"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.963686 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1959c2b9-5a70-4503-aef6-52dcfe28dd73-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.971078 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.980395 4793 scope.go:117] "RemoveContainer" containerID="73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25" Jan 27 20:25:07 crc kubenswrapper[4793]: E0127 20:25:07.980679 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25\": container with ID starting with 73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25 not found: ID does not exist" containerID="73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.980710 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25"} err="failed to get container status \"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25\": rpc error: code = NotFound desc = could not find container \"73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25\": container with ID starting with 73d1506cf1b6e71fec42fbf881ce3d0a406261e3ddddc2a09541ede841d45a25 not found: ID does not exist" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.980735 4793 scope.go:117] "RemoveContainer" containerID="4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.981140 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74bdb45575-5dqh9"] Jan 27 20:25:07 crc kubenswrapper[4793]: E0127 20:25:07.992943 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb\": container with ID starting with 4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb not found: ID does not exist" containerID="4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb" Jan 27 20:25:07 crc kubenswrapper[4793]: I0127 20:25:07.992979 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb"} err="failed to get container status \"4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb\": rpc error: code = NotFound desc = could not find container \"4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb\": container with ID starting with 4503e815e62847751d90f208bcd36819ca7b9c51d920ea8e5330e510a2e784cb not found: ID does not exist" Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.630282 4793 generic.go:334] "Generic (PLEG): container finished" podID="49c6c821-fd44-417d-9352-8f0a3443c80c" containerID="37ddeb194d858320bbff390426131055888110731a421183cbcb3424e8dad9c4" exitCode=0 Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.630342 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-423f-account-create-update-xxmkp" event={"ID":"49c6c821-fd44-417d-9352-8f0a3443c80c","Type":"ContainerDied","Data":"37ddeb194d858320bbff390426131055888110731a421183cbcb3424e8dad9c4"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.633906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gds2t" event={"ID":"03131418-ea5d-47bd-906c-8a93c2712b1c","Type":"ContainerStarted","Data":"001d00b05aefcfd618f032911084bf2d9b99def427acc0f5f476bb2efafe651d"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.638275 4793 generic.go:334] "Generic (PLEG): container finished" podID="94a9e18d-b12c-4edd-b7d5-0e976341ab95" containerID="95bf2cf70043fc83d341b65f0b8311c6a7cb175fdebc8e3c7e7a8c45e924a3c4" exitCode=0 Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.638359 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sqcpz" event={"ID":"94a9e18d-b12c-4edd-b7d5-0e976341ab95","Type":"ContainerDied","Data":"95bf2cf70043fc83d341b65f0b8311c6a7cb175fdebc8e3c7e7a8c45e924a3c4"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.642896 4793 generic.go:334] "Generic (PLEG): container finished" podID="3093a815-0d7f-4490-96cb-87cb11a1eb4a" containerID="2b480f404d36e6496570497fb774c548ea2f902fba91032176a35eaa1224d025" exitCode=0 Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.643090 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5058-account-create-update-jrldw" event={"ID":"3093a815-0d7f-4490-96cb-87cb11a1eb4a","Type":"ContainerDied","Data":"2b480f404d36e6496570497fb774c548ea2f902fba91032176a35eaa1224d025"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.645482 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac6d-account-create-update-fd9n2" event={"ID":"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d","Type":"ContainerStarted","Data":"e54c839c21db618b9435b0607c371ed564b02bbcb545995ebe577d81cb20d308"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.645521 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac6d-account-create-update-fd9n2" event={"ID":"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d","Type":"ContainerStarted","Data":"e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.656132 4793 generic.go:334] "Generic (PLEG): container finished" podID="7db08350-41e5-47fc-912f-d2a00aef5fc6" containerID="aba883d1a79edfa666fd45bf1e958b02f02ba7a54b880845ba4c2dfa9d2b9c96" exitCode=0 Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.656335 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-q7xrq" event={"ID":"7db08350-41e5-47fc-912f-d2a00aef5fc6","Type":"ContainerDied","Data":"aba883d1a79edfa666fd45bf1e958b02f02ba7a54b880845ba4c2dfa9d2b9c96"} Jan 27 20:25:08 crc kubenswrapper[4793]: I0127 20:25:08.674152 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-ac6d-account-create-update-fd9n2" podStartSLOduration=3.6741281199999998 podStartE2EDuration="3.67412812s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:08.666331954 +0000 UTC m=+1334.056585100" watchObservedRunningTime="2026-01-27 20:25:08.67412812 +0000 UTC m=+1334.064381276" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.007864 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.091571 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgt7t\" (UniqueName: \"kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t\") pod \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.091656 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts\") pod \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\" (UID: \"e9abb695-6b0a-423e-bccc-a6910c0cafc5\") " Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.092864 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e9abb695-6b0a-423e-bccc-a6910c0cafc5" (UID: "e9abb695-6b0a-423e-bccc-a6910c0cafc5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.142147 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t" (OuterVolumeSpecName: "kube-api-access-bgt7t") pod "e9abb695-6b0a-423e-bccc-a6910c0cafc5" (UID: "e9abb695-6b0a-423e-bccc-a6910c0cafc5"). InnerVolumeSpecName "kube-api-access-bgt7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.197829 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgt7t\" (UniqueName: \"kubernetes.io/projected/e9abb695-6b0a-423e-bccc-a6910c0cafc5-kube-api-access-bgt7t\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.197864 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9abb695-6b0a-423e-bccc-a6910c0cafc5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.435262 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.442759 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.669499 4793 generic.go:334] "Generic (PLEG): container finished" podID="074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" containerID="e54c839c21db618b9435b0607c371ed564b02bbcb545995ebe577d81cb20d308" exitCode=0 Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.669642 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac6d-account-create-update-fd9n2" event={"ID":"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d","Type":"ContainerDied","Data":"e54c839c21db618b9435b0607c371ed564b02bbcb545995ebe577d81cb20d308"} Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.672075 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-58r7m" event={"ID":"e9abb695-6b0a-423e-bccc-a6910c0cafc5","Type":"ContainerDied","Data":"380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109"} Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.672119 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="380daa7ecf95550b851e4a2f2fddd665772aa72bb727f96a050c60aef6cbf109" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.672250 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-58r7m" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.681978 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 27 20:25:09 crc kubenswrapper[4793]: I0127 20:25:09.832190 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" path="/var/lib/kubelet/pods/1959c2b9-5a70-4503-aef6-52dcfe28dd73/volumes" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.214795 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.325261 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts\") pod \"49c6c821-fd44-417d-9352-8f0a3443c80c\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.326380 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "49c6c821-fd44-417d-9352-8f0a3443c80c" (UID: "49c6c821-fd44-417d-9352-8f0a3443c80c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.326953 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggr5g\" (UniqueName: \"kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g\") pod \"49c6c821-fd44-417d-9352-8f0a3443c80c\" (UID: \"49c6c821-fd44-417d-9352-8f0a3443c80c\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.328497 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49c6c821-fd44-417d-9352-8f0a3443c80c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.342628 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g" (OuterVolumeSpecName: "kube-api-access-ggr5g") pod "49c6c821-fd44-417d-9352-8f0a3443c80c" (UID: "49c6c821-fd44-417d-9352-8f0a3443c80c"). InnerVolumeSpecName "kube-api-access-ggr5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.413154 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.421675 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.426500 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.437903 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggr5g\" (UniqueName: \"kubernetes.io/projected/49c6c821-fd44-417d-9352-8f0a3443c80c-kube-api-access-ggr5g\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539285 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts\") pod \"7db08350-41e5-47fc-912f-d2a00aef5fc6\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539372 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wbls\" (UniqueName: \"kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls\") pod \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539583 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6szjn\" (UniqueName: \"kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn\") pod \"7db08350-41e5-47fc-912f-d2a00aef5fc6\" (UID: \"7db08350-41e5-47fc-912f-d2a00aef5fc6\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539615 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts\") pod \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539651 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts\") pod \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\" (UID: \"3093a815-0d7f-4490-96cb-87cb11a1eb4a\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.539679 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcwdk\" (UniqueName: \"kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk\") pod \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\" (UID: \"94a9e18d-b12c-4edd-b7d5-0e976341ab95\") " Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.541255 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7db08350-41e5-47fc-912f-d2a00aef5fc6" (UID: "7db08350-41e5-47fc-912f-d2a00aef5fc6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.541756 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3093a815-0d7f-4490-96cb-87cb11a1eb4a" (UID: "3093a815-0d7f-4490-96cb-87cb11a1eb4a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.542367 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94a9e18d-b12c-4edd-b7d5-0e976341ab95" (UID: "94a9e18d-b12c-4edd-b7d5-0e976341ab95"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.544785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls" (OuterVolumeSpecName: "kube-api-access-6wbls") pod "3093a815-0d7f-4490-96cb-87cb11a1eb4a" (UID: "3093a815-0d7f-4490-96cb-87cb11a1eb4a"). InnerVolumeSpecName "kube-api-access-6wbls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.546795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk" (OuterVolumeSpecName: "kube-api-access-rcwdk") pod "94a9e18d-b12c-4edd-b7d5-0e976341ab95" (UID: "94a9e18d-b12c-4edd-b7d5-0e976341ab95"). InnerVolumeSpecName "kube-api-access-rcwdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.560017 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn" (OuterVolumeSpecName: "kube-api-access-6szjn") pod "7db08350-41e5-47fc-912f-d2a00aef5fc6" (UID: "7db08350-41e5-47fc-912f-d2a00aef5fc6"). InnerVolumeSpecName "kube-api-access-6szjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641372 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6szjn\" (UniqueName: \"kubernetes.io/projected/7db08350-41e5-47fc-912f-d2a00aef5fc6-kube-api-access-6szjn\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641414 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a9e18d-b12c-4edd-b7d5-0e976341ab95-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641425 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3093a815-0d7f-4490-96cb-87cb11a1eb4a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641435 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcwdk\" (UniqueName: \"kubernetes.io/projected/94a9e18d-b12c-4edd-b7d5-0e976341ab95-kube-api-access-rcwdk\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641446 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7db08350-41e5-47fc-912f-d2a00aef5fc6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.641456 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wbls\" (UniqueName: \"kubernetes.io/projected/3093a815-0d7f-4490-96cb-87cb11a1eb4a-kube-api-access-6wbls\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.719904 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sqcpz" event={"ID":"94a9e18d-b12c-4edd-b7d5-0e976341ab95","Type":"ContainerDied","Data":"7785cbc9551e1448e98827244d8ccc12f07e2b0ed8d05aff758f3c2e7c639d6b"} Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.719948 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7785cbc9551e1448e98827244d8ccc12f07e2b0ed8d05aff758f3c2e7c639d6b" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.720016 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sqcpz" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.730175 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5058-account-create-update-jrldw" event={"ID":"3093a815-0d7f-4490-96cb-87cb11a1eb4a","Type":"ContainerDied","Data":"ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620"} Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.730227 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef2a01302995921138eb19d3a54ea655eb41e078b1a9e7c11a78e2ddf1480620" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.730309 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5058-account-create-update-jrldw" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.732441 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-q7xrq" event={"ID":"7db08350-41e5-47fc-912f-d2a00aef5fc6","Type":"ContainerDied","Data":"d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929"} Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.732485 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3de19e58077d26a5944bc8cfcb79cac950ff44508495032a875ff947f6d2929" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.732579 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-q7xrq" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.749245 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-423f-account-create-update-xxmkp" event={"ID":"49c6c821-fd44-417d-9352-8f0a3443c80c","Type":"ContainerDied","Data":"06249edcaa096344d03a67c72626f00953be8aa6b7fc40b69300fc33495be4f9"} Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.749295 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06249edcaa096344d03a67c72626f00953be8aa6b7fc40b69300fc33495be4f9" Jan 27 20:25:10 crc kubenswrapper[4793]: I0127 20:25:10.749378 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-423f-account-create-update-xxmkp" Jan 27 20:25:12 crc kubenswrapper[4793]: I0127 20:25:12.121106 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74bdb45575-5dqh9" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: i/o timeout" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.306754 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.384488 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts\") pod \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.384657 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmn2w\" (UniqueName: \"kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w\") pod \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\" (UID: \"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d\") " Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.385151 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" (UID: "074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.413784 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w" (OuterVolumeSpecName: "kube-api-access-bmn2w") pod "074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" (UID: "074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d"). InnerVolumeSpecName "kube-api-access-bmn2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.487699 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.488141 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmn2w\" (UniqueName: \"kubernetes.io/projected/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d-kube-api-access-bmn2w\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.822312 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac6d-account-create-update-fd9n2" event={"ID":"074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d","Type":"ContainerDied","Data":"e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7"} Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.822700 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9058385213d390584ead9b92a12bba890eb36cacae9574e977fb01544c0e1f7" Jan 27 20:25:15 crc kubenswrapper[4793]: I0127 20:25:15.822342 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac6d-account-create-update-fd9n2" Jan 27 20:25:29 crc kubenswrapper[4793]: E0127 20:25:29.577281 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Jan 27 20:25:29 crc kubenswrapper[4793]: E0127 20:25:29.577824 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-glance-api:watcher_latest" Jan 27 20:25:29 crc kubenswrapper[4793]: E0127 20:25:29.577996 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:38.102.83.195:5001/podified-master-centos10/openstack-glance-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dtqcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-gds2t_openstack(03131418-ea5d-47bd-906c-8a93c2712b1c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:25:29 crc kubenswrapper[4793]: E0127 20:25:29.579493 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-gds2t" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" Jan 27 20:25:30 crc kubenswrapper[4793]: E0127 20:25:30.006460 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-glance-api:watcher_latest\\\"\"" pod="openstack/glance-db-sync-gds2t" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" Jan 27 20:25:30 crc kubenswrapper[4793]: E0127 20:25:30.451185 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Jan 27 20:25:30 crc kubenswrapper[4793]: E0127 20:25:30.451235 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-watcher-api:watcher_latest" Jan 27 20:25:30 crc kubenswrapper[4793]: E0127 20:25:30.451351 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-db-sync,Image:38.102.83.195:5001/podified-master-centos10/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d79sn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-db-sync-mvcq4_openstack(7a01af79-2bf8-40d8-aa3b-5ea2df7b6941): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:25:30 crc kubenswrapper[4793]: E0127 20:25:30.453486 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-db-sync-mvcq4" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" Jan 27 20:25:31 crc kubenswrapper[4793]: I0127 20:25:31.014976 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tsmxm" event={"ID":"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5","Type":"ContainerStarted","Data":"630ba15635812f4b701dae61df9462382e9d806e856d29da60cdaa91f6c68caa"} Jan 27 20:25:31 crc kubenswrapper[4793]: E0127 20:25:31.017963 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-watcher-api:watcher_latest\\\"\"" pod="openstack/watcher-db-sync-mvcq4" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" Jan 27 20:25:31 crc kubenswrapper[4793]: I0127 20:25:31.079057 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-tsmxm" podStartSLOduration=2.918227123 podStartE2EDuration="26.079039332s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="2026-01-27 20:25:07.260713332 +0000 UTC m=+1332.650966488" lastFinishedPulling="2026-01-27 20:25:30.421525541 +0000 UTC m=+1355.811778697" observedRunningTime="2026-01-27 20:25:31.074257372 +0000 UTC m=+1356.464510528" watchObservedRunningTime="2026-01-27 20:25:31.079039332 +0000 UTC m=+1356.469292488" Jan 27 20:25:36 crc kubenswrapper[4793]: I0127 20:25:36.063431 4793 generic.go:334] "Generic (PLEG): container finished" podID="b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" containerID="630ba15635812f4b701dae61df9462382e9d806e856d29da60cdaa91f6c68caa" exitCode=0 Jan 27 20:25:36 crc kubenswrapper[4793]: I0127 20:25:36.063558 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tsmxm" event={"ID":"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5","Type":"ContainerDied","Data":"630ba15635812f4b701dae61df9462382e9d806e856d29da60cdaa91f6c68caa"} Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.479059 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.595670 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2wxm\" (UniqueName: \"kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm\") pod \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.595960 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle\") pod \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.595989 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data\") pod \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\" (UID: \"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5\") " Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.602867 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm" (OuterVolumeSpecName: "kube-api-access-q2wxm") pod "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" (UID: "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5"). InnerVolumeSpecName "kube-api-access-q2wxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.623322 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" (UID: "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.657195 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data" (OuterVolumeSpecName: "config-data") pod "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" (UID: "b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.704758 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.704790 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:37 crc kubenswrapper[4793]: I0127 20:25:37.704801 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2wxm\" (UniqueName: \"kubernetes.io/projected/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5-kube-api-access-q2wxm\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.140838 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tsmxm" event={"ID":"b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5","Type":"ContainerDied","Data":"ccc83154ed0b68c4ada320c7103b4aa19536e9ddf7ef81b0162e8350a78918b9"} Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.140950 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccc83154ed0b68c4ada320c7103b4aa19536e9ddf7ef81b0162e8350a78918b9" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.140955 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tsmxm" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.399627 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6t92c"] Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400009 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c6c821-fd44-417d-9352-8f0a3443c80c" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400026 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c6c821-fd44-417d-9352-8f0a3443c80c" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400042 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3093a815-0d7f-4490-96cb-87cb11a1eb4a" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400050 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="3093a815-0d7f-4490-96cb-87cb11a1eb4a" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400058 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="dnsmasq-dns" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400065 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="dnsmasq-dns" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400080 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400086 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400094 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="init" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400100 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="init" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400111 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7db08350-41e5-47fc-912f-d2a00aef5fc6" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400117 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7db08350-41e5-47fc-912f-d2a00aef5fc6" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400129 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9abb695-6b0a-423e-bccc-a6910c0cafc5" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400135 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9abb695-6b0a-423e-bccc-a6910c0cafc5" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400153 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a9e18d-b12c-4edd-b7d5-0e976341ab95" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400158 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a9e18d-b12c-4edd-b7d5-0e976341ab95" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: E0127 20:25:38.400168 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" containerName="keystone-db-sync" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400174 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" containerName="keystone-db-sync" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400320 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1959c2b9-5a70-4503-aef6-52dcfe28dd73" containerName="dnsmasq-dns" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400346 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7db08350-41e5-47fc-912f-d2a00aef5fc6" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400353 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="94a9e18d-b12c-4edd-b7d5-0e976341ab95" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400363 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" containerName="keystone-db-sync" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400371 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400390 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c6c821-fd44-417d-9352-8f0a3443c80c" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400400 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="3093a815-0d7f-4490-96cb-87cb11a1eb4a" containerName="mariadb-account-create-update" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400416 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9abb695-6b0a-423e-bccc-a6910c0cafc5" containerName="mariadb-database-create" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.400970 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.405533 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.405670 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.405717 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.405545 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5strr" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.406178 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.442847 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.444946 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.478647 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.519818 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6t92c"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538764 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538817 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538851 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbb49\" (UniqueName: \"kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538871 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538897 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538921 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.538984 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.539072 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.539101 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xktxv\" (UniqueName: \"kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.539132 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.539191 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.539224 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.602518 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.604292 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.617343 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-c4xkk" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.617599 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.617640 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.617764 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.617918 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640598 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640650 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xktxv\" (UniqueName: \"kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640682 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640743 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640765 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640819 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640841 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640869 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbb49\" (UniqueName: \"kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640890 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640921 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640945 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.640990 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.642126 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.642827 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.643515 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.643982 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.644557 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.656671 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.657700 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.663414 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.663467 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.672260 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.725828 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xktxv\" (UniqueName: \"kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv\") pod \"dnsmasq-dns-fb9b786bc-jrbtx\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.726474 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbb49\" (UniqueName: \"kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49\") pod \"keystone-bootstrap-6t92c\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.745814 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.745903 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qnt7\" (UniqueName: \"kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.745943 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.745972 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.746032 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.776007 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.811156 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-cthwx"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.821173 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866302 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866373 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866412 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866481 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866539 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866773 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqx5g\" (UniqueName: \"kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866809 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.866918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qnt7\" (UniqueName: \"kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.869823 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cthwx"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.870181 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.872225 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.873073 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.873143 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.881377 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.891868 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-wfnzb" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.893223 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.932628 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-cql8q"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.935972 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.940225 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4mmj9" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.940529 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.941375 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qnt7\" (UniqueName: \"kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7\") pod \"horizon-799bb4b979-hxnqj\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.950443 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.950781 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.959609 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cql8q"] Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.968591 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.969086 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.969387 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqx5g\" (UniqueName: \"kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.981572 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:38 crc kubenswrapper[4793]: I0127 20:25:38.992598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:39 crc kubenswrapper[4793]: I0127 20:25:39.026324 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088361 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088449 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvc7f\" (UniqueName: \"kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088507 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088575 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088600 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.088667 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.125462 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-wlfpp"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.127889 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.135300 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.146461 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqx5g\" (UniqueName: \"kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g\") pod \"neutron-db-sync-cthwx\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.163935 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-lwxzs" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.190186 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.190234 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvc7f\" (UniqueName: \"kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.190264 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.190609 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.191845 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.191882 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.191923 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.191984 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgtxm\" (UniqueName: \"kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.192079 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.192124 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.202805 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.207049 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.208268 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.210934 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-wlfpp"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.227138 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvc7f\" (UniqueName: \"kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.228328 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data\") pod \"cinder-db-sync-cql8q\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.232823 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.234539 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.279623 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.284801 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cthwx" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.292981 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293050 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293198 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293251 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g28mx\" (UniqueName: \"kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293414 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgtxm\" (UniqueName: \"kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293448 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293544 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.293591 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.294970 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.300708 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.305816 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.305984 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.309429 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.336304 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.347973 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cql8q" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.363013 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgtxm\" (UniqueName: \"kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm\") pod \"barbican-db-sync-wlfpp\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.364031 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.397404 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g28mx\" (UniqueName: \"kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.397472 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.397510 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wskfl\" (UniqueName: \"kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409314 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409451 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409537 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409592 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409721 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409766 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409837 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409907 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.409948 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.416348 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.417457 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.420080 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.424027 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.425328 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ldntm"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.427462 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.451695 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.452016 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-w5n2h" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623190 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623258 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623309 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623330 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvdfp\" (UniqueName: \"kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623350 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623380 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623400 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623447 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623475 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wskfl\" (UniqueName: \"kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623503 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623534 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.623624 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.625382 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.630844 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.632077 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.649136 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.653893 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.654217 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.665247 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g28mx\" (UniqueName: \"kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx\") pod \"horizon-7468b88d47-6g4mq\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.671080 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.688161 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wskfl\" (UniqueName: \"kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.688511 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.720462 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ldntm"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.721658 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts\") pod \"ceilometer-0\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.724806 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.724876 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.724934 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.725016 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvdfp\" (UniqueName: \"kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.725059 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.725778 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.738712 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.746392 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvdfp\" (UniqueName: \"kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.747244 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.754112 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data\") pod \"placement-db-sync-ldntm\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.783752 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.794090 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826164 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826213 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826237 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826274 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826334 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25lc7\" (UniqueName: \"kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.826408 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.865330 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.869546 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ldntm" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.877268 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.927885 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.927949 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.928017 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.928083 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25lc7\" (UniqueName: \"kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.928172 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.928228 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.929105 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.929522 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.930214 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.931042 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.940680 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.952729 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25lc7\" (UniqueName: \"kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7\") pod \"dnsmasq-dns-c9bc79ddc-g8sj2\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:39.965236 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:40.253951 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:40 crc kubenswrapper[4793]: W0127 20:25:40.634645 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod659dcf7f_fe0d_449e_99e0_f583f1df75b9.slice/crio-78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a WatchSource:0}: Error finding container 78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a: Status 404 returned error can't find the container with id 78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a Jan 27 20:25:40 crc kubenswrapper[4793]: I0127 20:25:40.636202 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.054989 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cthwx"] Jan 27 20:25:41 crc kubenswrapper[4793]: W0127 20:25:41.063563 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ce6efd1_1d02_4e0c_bb44_3e2daac046bd.slice/crio-7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a WatchSource:0}: Error finding container 7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a: Status 404 returned error can't find the container with id 7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.108982 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-wlfpp"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.117652 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.124717 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6t92c"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.142144 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ldntm"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.180870 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.191238 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.242314 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7468b88d47-6g4mq" event={"ID":"28e1e383-f9b8-4d10-9503-71fed48f7777","Type":"ContainerStarted","Data":"65293e86f329ff150dd2721193e4febc8a99d147e07941e1221c9bca1343fb85"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.243315 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.245901 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ldntm" event={"ID":"cc67d37b-e18b-47bc-8328-f1f3145f9dc9","Type":"ContainerStarted","Data":"c62d47c31f81cd466ab8adeacb0997fee02f40e964df93307ada98991c7c9fc4"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.257865 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerStarted","Data":"41dbf848c4c3f99fd4dd08b025627e1339749b3664716eabb61e449cc5200f4d"} Jan 27 20:25:41 crc kubenswrapper[4793]: W0127 20:25:41.266785 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda07ca8f7_3387_4f58_a094_26d491028752.slice/crio-6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec WatchSource:0}: Error finding container 6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec: Status 404 returned error can't find the container with id 6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.270003 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-799bb4b979-hxnqj" event={"ID":"c74d74d5-edae-4bb5-a9be-3fc732d3de84","Type":"ContainerStarted","Data":"e6345f21144fd77d0acc7bed9a185f8eb3e5bf968d0c26c35c4f183f7f11aa16"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.272261 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cql8q"] Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.274259 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cthwx" event={"ID":"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd","Type":"ContainerStarted","Data":"7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.282065 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6t92c" event={"ID":"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2","Type":"ContainerStarted","Data":"d49d304fd8403c5eacf2c0c814159d5b980c0010f59cad1877b9f7dc18ebb379"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.295867 4793 generic.go:334] "Generic (PLEG): container finished" podID="659dcf7f-fe0d-449e-99e0-f583f1df75b9" containerID="0135ecd6f39cda93f94f90acd8016130a9af4edd6d14a11a2a21510f0611c6a8" exitCode=0 Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.295968 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" event={"ID":"659dcf7f-fe0d-449e-99e0-f583f1df75b9","Type":"ContainerDied","Data":"0135ecd6f39cda93f94f90acd8016130a9af4edd6d14a11a2a21510f0611c6a8"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.296004 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" event={"ID":"659dcf7f-fe0d-449e-99e0-f583f1df75b9","Type":"ContainerStarted","Data":"78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.302788 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wlfpp" event={"ID":"64b480ab-f615-4d0b-9b56-ef6d0acf8955","Type":"ContainerStarted","Data":"5c19f9067c922241a4aa64fc432795cc88973be6f60782e1b5b1f25d22b537c4"} Jan 27 20:25:41 crc kubenswrapper[4793]: I0127 20:25:41.865929 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.078640 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.080565 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.167047 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.181889 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.212758 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.212801 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.212854 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.212914 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.212933 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f2dr\" (UniqueName: \"kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.314884 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.314926 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f2dr\" (UniqueName: \"kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.315032 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.315057 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.315102 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.315522 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.316450 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.316894 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.318369 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.335831 4793 generic.go:334] "Generic (PLEG): container finished" podID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerID="b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa" exitCode=0 Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.335931 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" event={"ID":"1ac23d66-3218-4159-9f23-87d2ab5078ed","Type":"ContainerDied","Data":"b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.335963 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" event={"ID":"1ac23d66-3218-4159-9f23-87d2ab5078ed","Type":"ContainerStarted","Data":"57b95ec1249f52bdf8d191b10fecb5310273ea8d140cd6611e95079ea7f0c775"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.339598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f2dr\" (UniqueName: \"kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr\") pod \"horizon-57dc8976b9-b8gcq\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.347220 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cql8q" event={"ID":"a07ca8f7-3387-4f58-a094-26d491028752","Type":"ContainerStarted","Data":"6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.355536 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cthwx" event={"ID":"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd","Type":"ContainerStarted","Data":"b57ae8118283dabfe483c25d17c0911a8fe523b9a5aa9409b7e40dc7832e2ab8"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.386819 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6t92c" event={"ID":"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2","Type":"ContainerStarted","Data":"f55e52b39dbb515faebaa957e9fc502d437a957b91a622ff5429fbe75873c24b"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.394599 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" event={"ID":"659dcf7f-fe0d-449e-99e0-f583f1df75b9","Type":"ContainerDied","Data":"78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a"} Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.394667 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78ea0c2542a33b3d86dfbe8c9963dc7c9fa03fc5dd12c64c2121154195fd947a" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.399294 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-cthwx" podStartSLOduration=4.399277154 podStartE2EDuration="4.399277154s" podCreationTimestamp="2026-01-27 20:25:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:42.394990896 +0000 UTC m=+1367.785244062" watchObservedRunningTime="2026-01-27 20:25:42.399277154 +0000 UTC m=+1367.789530300" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.410262 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.418439 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.418866 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.418939 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.419297 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xktxv\" (UniqueName: \"kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.421893 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.421971 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc\") pod \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\" (UID: \"659dcf7f-fe0d-449e-99e0-f583f1df75b9\") " Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.427938 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6t92c" podStartSLOduration=4.427917536 podStartE2EDuration="4.427917536s" podCreationTimestamp="2026-01-27 20:25:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:42.425029423 +0000 UTC m=+1367.815282579" watchObservedRunningTime="2026-01-27 20:25:42.427917536 +0000 UTC m=+1367.818170692" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.430415 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv" (OuterVolumeSpecName: "kube-api-access-xktxv") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "kube-api-access-xktxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.451536 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.457212 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.457776 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.460777 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config" (OuterVolumeSpecName: "config") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.470235 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.524232 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.524269 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.524287 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.524307 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xktxv\" (UniqueName: \"kubernetes.io/projected/659dcf7f-fe0d-449e-99e0-f583f1df75b9-kube-api-access-xktxv\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.524329 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.649139 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "659dcf7f-fe0d-449e-99e0-f583f1df75b9" (UID: "659dcf7f-fe0d-449e-99e0-f583f1df75b9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:42 crc kubenswrapper[4793]: I0127 20:25:42.734171 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659dcf7f-fe0d-449e-99e0-f583f1df75b9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.449071 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb9b786bc-jrbtx" Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.449644 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" event={"ID":"1ac23d66-3218-4159-9f23-87d2ab5078ed","Type":"ContainerStarted","Data":"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be"} Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.515643 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.527334 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fb9b786bc-jrbtx"] Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.561506 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:25:43 crc kubenswrapper[4793]: I0127 20:25:43.849751 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="659dcf7f-fe0d-449e-99e0-f583f1df75b9" path="/var/lib/kubelet/pods/659dcf7f-fe0d-449e-99e0-f583f1df75b9/volumes" Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.482478 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57dc8976b9-b8gcq" event={"ID":"efeab187-5a5f-4b7d-b05e-fe86588cea2f","Type":"ContainerStarted","Data":"ed75d441f52c4868840191d3c6f5bdf82e71b537583e97974719f048e1cdd8d5"} Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.498097 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gds2t" event={"ID":"03131418-ea5d-47bd-906c-8a93c2712b1c","Type":"ContainerStarted","Data":"d2475a4da699bc85b2811da31a71ec1292a305fcfd89bf10807ca73a96f9deb7"} Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.504350 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-mvcq4" event={"ID":"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941","Type":"ContainerStarted","Data":"2c324627880916785a1f5e7c9b641a070bad1fb19c4169dd1d63b48f7fb0d97a"} Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.504515 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.518636 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-gds2t" podStartSLOduration=5.328576509 podStartE2EDuration="39.518616222s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="2026-01-27 20:25:08.009127211 +0000 UTC m=+1333.399380367" lastFinishedPulling="2026-01-27 20:25:42.199166924 +0000 UTC m=+1367.589420080" observedRunningTime="2026-01-27 20:25:44.5177675 +0000 UTC m=+1369.908020666" watchObservedRunningTime="2026-01-27 20:25:44.518616222 +0000 UTC m=+1369.908869378" Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.553058 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-mvcq4" podStartSLOduration=3.767784718 podStartE2EDuration="39.553041469s" podCreationTimestamp="2026-01-27 20:25:05 +0000 UTC" firstStartedPulling="2026-01-27 20:25:07.205377158 +0000 UTC m=+1332.595630314" lastFinishedPulling="2026-01-27 20:25:42.990633909 +0000 UTC m=+1368.380887065" observedRunningTime="2026-01-27 20:25:44.541040127 +0000 UTC m=+1369.931293283" watchObservedRunningTime="2026-01-27 20:25:44.553041469 +0000 UTC m=+1369.943294625" Jan 27 20:25:44 crc kubenswrapper[4793]: I0127 20:25:44.573689 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" podStartSLOduration=5.573667598 podStartE2EDuration="5.573667598s" podCreationTimestamp="2026-01-27 20:25:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:25:44.569329619 +0000 UTC m=+1369.959582775" watchObservedRunningTime="2026-01-27 20:25:44.573667598 +0000 UTC m=+1369.963920744" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.740836 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.837911 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:25:48 crc kubenswrapper[4793]: E0127 20:25:48.838639 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="659dcf7f-fe0d-449e-99e0-f583f1df75b9" containerName="init" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.838654 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="659dcf7f-fe0d-449e-99e0-f583f1df75b9" containerName="init" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.838837 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="659dcf7f-fe0d-449e-99e0-f583f1df75b9" containerName="init" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.840226 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.852925 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.863977 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864047 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sw2n\" (UniqueName: \"kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864077 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864105 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864146 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864171 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.864187 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.867107 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.925932 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967339 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967402 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sw2n\" (UniqueName: \"kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967436 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967462 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967501 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967522 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.967555 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.970431 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.971528 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.973086 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.982146 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-57fc549f96-h7nth"] Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.983802 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.989794 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.992914 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:48 crc kubenswrapper[4793]: I0127 20:25:48.993047 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.011403 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57fc549f96-h7nth"] Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.014370 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sw2n\" (UniqueName: \"kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n\") pod \"horizon-54fb8bbf88-42dqw\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.293607 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294430 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-scripts\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294570 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598878f3-c1fc-481f-ad69-dacba44a1ccc-logs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294642 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-secret-key\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294706 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crlgq\" (UniqueName: \"kubernetes.io/projected/598878f3-c1fc-481f-ad69-dacba44a1ccc-kube-api-access-crlgq\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294732 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-config-data\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294758 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-combined-ca-bundle\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.294786 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-tls-certs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396349 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-scripts\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396464 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598878f3-c1fc-481f-ad69-dacba44a1ccc-logs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396527 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-secret-key\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396600 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crlgq\" (UniqueName: \"kubernetes.io/projected/598878f3-c1fc-481f-ad69-dacba44a1ccc-kube-api-access-crlgq\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396628 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-config-data\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396721 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-combined-ca-bundle\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.396750 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-tls-certs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.397531 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/598878f3-c1fc-481f-ad69-dacba44a1ccc-logs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.398418 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-scripts\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.399964 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/598878f3-c1fc-481f-ad69-dacba44a1ccc-config-data\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.406558 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-secret-key\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.406588 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-horizon-tls-certs\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.411093 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/598878f3-c1fc-481f-ad69-dacba44a1ccc-combined-ca-bundle\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.424095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crlgq\" (UniqueName: \"kubernetes.io/projected/598878f3-c1fc-481f-ad69-dacba44a1ccc-kube-api-access-crlgq\") pod \"horizon-57fc549f96-h7nth\" (UID: \"598878f3-c1fc-481f-ad69-dacba44a1ccc\") " pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:49 crc kubenswrapper[4793]: I0127 20:25:49.695213 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:25:50 crc kubenswrapper[4793]: I0127 20:25:50.259027 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:25:50 crc kubenswrapper[4793]: I0127 20:25:50.343928 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:25:50 crc kubenswrapper[4793]: I0127 20:25:50.344200 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" containerID="cri-o://3601776dffcf1c7db9e0cdc9e08315be1bce4aca1486b21b5ea839d8aee0fe1c" gracePeriod=10 Jan 27 20:25:50 crc kubenswrapper[4793]: I0127 20:25:50.603269 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:25:50 crc kubenswrapper[4793]: I0127 20:25:50.644348 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57fc549f96-h7nth"] Jan 27 20:25:51 crc kubenswrapper[4793]: I0127 20:25:51.152642 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerStarted","Data":"653a93051b6bbb036135bc48cf90dde05e2f6820135c1d0306e084e4592954a9"} Jan 27 20:25:51 crc kubenswrapper[4793]: I0127 20:25:51.155408 4793 generic.go:334] "Generic (PLEG): container finished" podID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerID="3601776dffcf1c7db9e0cdc9e08315be1bce4aca1486b21b5ea839d8aee0fe1c" exitCode=0 Jan 27 20:25:51 crc kubenswrapper[4793]: I0127 20:25:51.155441 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" event={"ID":"2c367af8-d76d-4cbc-8fa4-729babd56421","Type":"ContainerDied","Data":"3601776dffcf1c7db9e0cdc9e08315be1bce4aca1486b21b5ea839d8aee0fe1c"} Jan 27 20:25:51 crc kubenswrapper[4793]: I0127 20:25:51.196529 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.958456 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984023 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984147 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984193 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984248 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvpx2\" (UniqueName: \"kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984277 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.984377 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc\") pod \"2c367af8-d76d-4cbc-8fa4-729babd56421\" (UID: \"2c367af8-d76d-4cbc-8fa4-729babd56421\") " Jan 27 20:25:56 crc kubenswrapper[4793]: I0127 20:25:56.991011 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2" (OuterVolumeSpecName: "kube-api-access-zvpx2") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "kube-api-access-zvpx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.033441 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config" (OuterVolumeSpecName: "config") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.050958 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.053193 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.072425 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.074485 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2c367af8-d76d-4cbc-8fa4-729babd56421" (UID: "2c367af8-d76d-4cbc-8fa4-729babd56421"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086673 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086710 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086722 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086733 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvpx2\" (UniqueName: \"kubernetes.io/projected/2c367af8-d76d-4cbc-8fa4-729babd56421-kube-api-access-zvpx2\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086748 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.086760 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c367af8-d76d-4cbc-8fa4-729babd56421-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.334483 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" event={"ID":"2c367af8-d76d-4cbc-8fa4-729babd56421","Type":"ContainerDied","Data":"340c28069a12214055208c774e1383bcca4d51d0c7d36907140fe80ff240ab8a"} Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.334520 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.334646 4793 scope.go:117] "RemoveContainer" containerID="3601776dffcf1c7db9e0cdc9e08315be1bce4aca1486b21b5ea839d8aee0fe1c" Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.335600 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57fc549f96-h7nth" event={"ID":"598878f3-c1fc-481f-ad69-dacba44a1ccc","Type":"ContainerStarted","Data":"378e8fb3efdea88316ef2694cb4069844a10acf2ea37d4442c82039f97afeef6"} Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.379495 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:25:57 crc kubenswrapper[4793]: I0127 20:25:57.388428 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76976dd897-x6b4b"] Jan 27 20:25:58 crc kubenswrapper[4793]: I0127 20:25:58.965908 4793 patch_prober.go:28] interesting pod/downloads-7954f5f757-mq8nr container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.41:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 20:25:58 crc kubenswrapper[4793]: I0127 20:25:58.965980 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mq8nr" podUID="a043f31e-8e0a-41eb-a2ad-73f6d5795b0a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.41:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:25:58 crc kubenswrapper[4793]: I0127 20:25:58.993035 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" path="/var/lib/kubelet/pods/2c367af8-d76d-4cbc-8fa4-729babd56421/volumes" Jan 27 20:25:58 crc kubenswrapper[4793]: E0127 20:25:58.994206 4793 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.192s" Jan 27 20:25:59 crc kubenswrapper[4793]: I0127 20:25:59.997779 4793 generic.go:334] "Generic (PLEG): container finished" podID="517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" containerID="f55e52b39dbb515faebaa957e9fc502d437a957b91a622ff5429fbe75873c24b" exitCode=0 Jan 27 20:25:59 crc kubenswrapper[4793]: I0127 20:25:59.997822 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6t92c" event={"ID":"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2","Type":"ContainerDied","Data":"f55e52b39dbb515faebaa957e9fc502d437a957b91a622ff5429fbe75873c24b"} Jan 27 20:26:01 crc kubenswrapper[4793]: I0127 20:26:01.196725 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-76976dd897-x6b4b" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Jan 27 20:26:01 crc kubenswrapper[4793]: I0127 20:26:01.216181 4793 generic.go:334] "Generic (PLEG): container finished" podID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" containerID="2c324627880916785a1f5e7c9b641a070bad1fb19c4169dd1d63b48f7fb0d97a" exitCode=0 Jan 27 20:26:01 crc kubenswrapper[4793]: I0127 20:26:01.216209 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-mvcq4" event={"ID":"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941","Type":"ContainerDied","Data":"2c324627880916785a1f5e7c9b641a070bad1fb19c4169dd1d63b48f7fb0d97a"} Jan 27 20:26:01 crc kubenswrapper[4793]: E0127 20:26:01.879500 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Jan 27 20:26:01 crc kubenswrapper[4793]: E0127 20:26:01.880048 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Jan 27 20:26:01 crc kubenswrapper[4793]: E0127 20:26:01.880251 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:38.102.83.195:5001/podified-master-centos10/openstack-placement-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gvdfp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-ldntm_openstack(cc67d37b-e18b-47bc-8328-f1f3145f9dc9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:01 crc kubenswrapper[4793]: E0127 20:26:01.881921 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-ldntm" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" Jan 27 20:26:02 crc kubenswrapper[4793]: E0127 20:26:02.231065 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-placement-api:watcher_latest\\\"\"" pod="openstack/placement-db-sync-ldntm" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" Jan 27 20:26:07 crc kubenswrapper[4793]: I0127 20:26:07.746687 4793 scope.go:117] "RemoveContainer" containerID="218a0f7a5f4d724886237bfaa1b750528812e01fcead661dc7e635bd3a2aeb47" Jan 27 20:26:07 crc kubenswrapper[4793]: E0127 20:26:07.756021 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:07 crc kubenswrapper[4793]: E0127 20:26:07.756081 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:07 crc kubenswrapper[4793]: E0127 20:26:07.756209 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8hb9hc8h679h77h568h559h679h597h59fh88h66dh8dh687h5f9hb9h545h678h64fh648h8bh667h7dh86h577h55fh677h676h5c6hc8h54dh5b6q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5f2dr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-57dc8976b9-b8gcq_openstack(efeab187-5a5f-4b7d-b05e-fe86588cea2f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:07 crc kubenswrapper[4793]: E0127 20:26:07.757737 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-57dc8976b9-b8gcq" podUID="efeab187-5a5f-4b7d-b05e-fe86588cea2f" Jan 27 20:26:09 crc kubenswrapper[4793]: I0127 20:26:09.861426 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017287 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbb49\" (UniqueName: \"kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017359 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017530 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017584 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017662 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.017707 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts\") pod \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\" (UID: \"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2\") " Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.024457 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49" (OuterVolumeSpecName: "kube-api-access-vbb49") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "kube-api-access-vbb49". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.024762 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.031698 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts" (OuterVolumeSpecName: "scripts") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.031786 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.051768 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.053236 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data" (OuterVolumeSpecName: "config-data") pod "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" (UID: "517cc4b5-c76f-401c-be7b-dcb82ef2a3f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.059397 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.059466 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.059636 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5cfh55h5fchb8h5d4h58ch574hb7h5c5h568h659h94h645h695h5d8h667hfchbch66h74h5bh68bh68ch587h5ddh669h58fh77h56fh594h58hb4q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2qnt7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-799bb4b979-hxnqj_openstack(c74d74d5-edae-4bb5-a9be-3fc732d3de84): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.064920 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-799bb4b979-hxnqj" podUID="c74d74d5-edae-4bb5-a9be-3fc732d3de84" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.119735 4793 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.120009 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.120018 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.120027 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbb49\" (UniqueName: \"kubernetes.io/projected/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-kube-api-access-vbb49\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.120036 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.120044 4793 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.147054 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.147141 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.147298 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n64ch5ffhc8h585hf9hffhddh89h4h65h594hf5h5dfh9ch78h65dh599h665h5h66dhd6h54bhbfh85h575h666h649hb5h8ch59ch665h674q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g28mx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-7468b88d47-6g4mq_openstack(28e1e383-f9b8-4d10-9503-71fed48f7777): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:10 crc kubenswrapper[4793]: E0127 20:26:10.151675 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-7468b88d47-6g4mq" podUID="28e1e383-f9b8-4d10-9503-71fed48f7777" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.305341 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6t92c" event={"ID":"517cc4b5-c76f-401c-be7b-dcb82ef2a3f2","Type":"ContainerDied","Data":"d49d304fd8403c5eacf2c0c814159d5b980c0010f59cad1877b9f7dc18ebb379"} Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.305386 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d49d304fd8403c5eacf2c0c814159d5b980c0010f59cad1877b9f7dc18ebb379" Jan 27 20:26:10 crc kubenswrapper[4793]: I0127 20:26:10.305504 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6t92c" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.070312 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6t92c"] Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.078344 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6t92c"] Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.170518 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dbdlw"] Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.171004 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" containerName="keystone-bootstrap" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.171027 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" containerName="keystone-bootstrap" Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.171039 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.171046 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.171056 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="init" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.171064 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="init" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.171269 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c367af8-d76d-4cbc-8fa4-729babd56421" containerName="dnsmasq-dns" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.171300 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" containerName="keystone-bootstrap" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.172032 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.174282 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.177928 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.178244 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.178409 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5strr" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.182899 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.185924 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dbdlw"] Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.302173 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.302219 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.302947 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.302999 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.303107 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.303247 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdnlk\" (UniqueName: \"kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405150 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdnlk\" (UniqueName: \"kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405485 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405510 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405585 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.405639 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.411388 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.411771 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.413065 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.413656 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.414818 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.423999 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdnlk\" (UniqueName: \"kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk\") pod \"keystone-bootstrap-dbdlw\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.432522 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.432617 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Jan 27 20:26:11 crc kubenswrapper[4793]: E0127 20:26:11.432847 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:38.102.83.195:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56bh54h9h75h594h545h5dch545h5fh54h684h675h64h85h668h59dh57ch7chbdh56bh658h545h8ch66bh557h675h669h675h64h55bh5b7h5f5q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wskfl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(aec1692e-4b57-4c95-863f-589e8f36e4a1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.490636 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:11 crc kubenswrapper[4793]: I0127 20:26:11.817101 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="517cc4b5-c76f-401c-be7b-dcb82ef2a3f2" path="/var/lib/kubelet/pods/517cc4b5-c76f-401c-be7b-dcb82ef2a3f2/volumes" Jan 27 20:26:22 crc kubenswrapper[4793]: I0127 20:26:22.753461 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:26:22 crc kubenswrapper[4793]: I0127 20:26:22.754066 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:26:26 crc kubenswrapper[4793]: E0127 20:26:26.291700 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:26 crc kubenswrapper[4793]: E0127 20:26:26.292197 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest" Jan 27 20:26:26 crc kubenswrapper[4793]: E0127 20:26:26.292362 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nbbh96h5d6h645h675h58h58ch5bbh8fh684h5dfh75hcbh686h585h567h5dfh79h89h67fh576h66h548h84h66h6fh584h55fh5cchfh66ch58q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-crlgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-57fc549f96-h7nth_openstack(598878f3-c1fc-481f-ad69-dacba44a1ccc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:26 crc kubenswrapper[4793]: E0127 20:26:26.296321 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-57fc549f96-h7nth" podUID="598878f3-c1fc-481f-ad69-dacba44a1ccc" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.396772 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.402319 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524583 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data\") pod \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524648 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs\") pod \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524689 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle\") pod \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524759 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f2dr\" (UniqueName: \"kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr\") pod \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524800 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key\") pod \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524875 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts\") pod \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\" (UID: \"efeab187-5a5f-4b7d-b05e-fe86588cea2f\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524907 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data\") pod \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.524982 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d79sn\" (UniqueName: \"kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn\") pod \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.525016 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data\") pod \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\" (UID: \"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941\") " Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.525691 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts" (OuterVolumeSpecName: "scripts") pod "efeab187-5a5f-4b7d-b05e-fe86588cea2f" (UID: "efeab187-5a5f-4b7d-b05e-fe86588cea2f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.525788 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data" (OuterVolumeSpecName: "config-data") pod "efeab187-5a5f-4b7d-b05e-fe86588cea2f" (UID: "efeab187-5a5f-4b7d-b05e-fe86588cea2f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.526084 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs" (OuterVolumeSpecName: "logs") pod "efeab187-5a5f-4b7d-b05e-fe86588cea2f" (UID: "efeab187-5a5f-4b7d-b05e-fe86588cea2f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.533757 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "efeab187-5a5f-4b7d-b05e-fe86588cea2f" (UID: "efeab187-5a5f-4b7d-b05e-fe86588cea2f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.533772 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn" (OuterVolumeSpecName: "kube-api-access-d79sn") pod "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" (UID: "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941"). InnerVolumeSpecName "kube-api-access-d79sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.535074 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" (UID: "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.537348 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr" (OuterVolumeSpecName: "kube-api-access-5f2dr") pod "efeab187-5a5f-4b7d-b05e-fe86588cea2f" (UID: "efeab187-5a5f-4b7d-b05e-fe86588cea2f"). InnerVolumeSpecName "kube-api-access-5f2dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.558140 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" (UID: "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.585795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data" (OuterVolumeSpecName: "config-data") pod "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" (UID: "7a01af79-2bf8-40d8-aa3b-5ea2df7b6941"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.626968 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627008 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/efeab187-5a5f-4b7d-b05e-fe86588cea2f-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627019 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627033 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f2dr\" (UniqueName: \"kubernetes.io/projected/efeab187-5a5f-4b7d-b05e-fe86588cea2f-kube-api-access-5f2dr\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627042 4793 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/efeab187-5a5f-4b7d-b05e-fe86588cea2f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627050 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/efeab187-5a5f-4b7d-b05e-fe86588cea2f-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627058 4793 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627067 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d79sn\" (UniqueName: \"kubernetes.io/projected/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-kube-api-access-d79sn\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.627078 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.632665 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-mvcq4" event={"ID":"7a01af79-2bf8-40d8-aa3b-5ea2df7b6941","Type":"ContainerDied","Data":"259f6ae9c96e28fdb25e140f07ce8cdc790fccf074af55427c583ef983607a3b"} Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.632715 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="259f6ae9c96e28fdb25e140f07ce8cdc790fccf074af55427c583ef983607a3b" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.632679 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-mvcq4" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.646494 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57dc8976b9-b8gcq" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.650082 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57dc8976b9-b8gcq" event={"ID":"efeab187-5a5f-4b7d-b05e-fe86588cea2f","Type":"ContainerDied","Data":"ed75d441f52c4868840191d3c6f5bdf82e71b537583e97974719f048e1cdd8d5"} Jan 27 20:26:26 crc kubenswrapper[4793]: E0127 20:26:26.654636 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-57fc549f96-h7nth" podUID="598878f3-c1fc-481f-ad69-dacba44a1ccc" Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.726438 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:26:26 crc kubenswrapper[4793]: I0127 20:26:26.735427 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-57dc8976b9-b8gcq"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.077303 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.083223 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.143297 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key\") pod \"28e1e383-f9b8-4d10-9503-71fed48f7777\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.143467 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts\") pod \"28e1e383-f9b8-4d10-9503-71fed48f7777\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.144057 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts" (OuterVolumeSpecName: "scripts") pod "28e1e383-f9b8-4d10-9503-71fed48f7777" (UID: "28e1e383-f9b8-4d10-9503-71fed48f7777"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.144147 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs\") pod \"28e1e383-f9b8-4d10-9503-71fed48f7777\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.144478 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs" (OuterVolumeSpecName: "logs") pod "28e1e383-f9b8-4d10-9503-71fed48f7777" (UID: "28e1e383-f9b8-4d10-9503-71fed48f7777"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.144587 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data\") pod \"28e1e383-f9b8-4d10-9503-71fed48f7777\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.144731 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g28mx\" (UniqueName: \"kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx\") pod \"28e1e383-f9b8-4d10-9503-71fed48f7777\" (UID: \"28e1e383-f9b8-4d10-9503-71fed48f7777\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.145442 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28e1e383-f9b8-4d10-9503-71fed48f7777-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.145467 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.146368 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data" (OuterVolumeSpecName: "config-data") pod "28e1e383-f9b8-4d10-9503-71fed48f7777" (UID: "28e1e383-f9b8-4d10-9503-71fed48f7777"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.146888 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "28e1e383-f9b8-4d10-9503-71fed48f7777" (UID: "28e1e383-f9b8-4d10-9503-71fed48f7777"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.246355 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs\") pod \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.246481 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data\") pod \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.246763 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key\") pod \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.246785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs" (OuterVolumeSpecName: "logs") pod "c74d74d5-edae-4bb5-a9be-3fc732d3de84" (UID: "c74d74d5-edae-4bb5-a9be-3fc732d3de84"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.246985 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts\") pod \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.247434 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qnt7\" (UniqueName: \"kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7\") pod \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\" (UID: \"c74d74d5-edae-4bb5-a9be-3fc732d3de84\") " Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.247835 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data" (OuterVolumeSpecName: "config-data") pod "c74d74d5-edae-4bb5-a9be-3fc732d3de84" (UID: "c74d74d5-edae-4bb5-a9be-3fc732d3de84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.248444 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/28e1e383-f9b8-4d10-9503-71fed48f7777-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.248468 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c74d74d5-edae-4bb5-a9be-3fc732d3de84-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.248479 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.248490 4793 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/28e1e383-f9b8-4d10-9503-71fed48f7777-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.272831 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts" (OuterVolumeSpecName: "scripts") pod "c74d74d5-edae-4bb5-a9be-3fc732d3de84" (UID: "c74d74d5-edae-4bb5-a9be-3fc732d3de84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.281957 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c74d74d5-edae-4bb5-a9be-3fc732d3de84" (UID: "c74d74d5-edae-4bb5-a9be-3fc732d3de84"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.290053 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7" (OuterVolumeSpecName: "kube-api-access-2qnt7") pod "c74d74d5-edae-4bb5-a9be-3fc732d3de84" (UID: "c74d74d5-edae-4bb5-a9be-3fc732d3de84"). InnerVolumeSpecName "kube-api-access-2qnt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.290581 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx" (OuterVolumeSpecName: "kube-api-access-g28mx") pod "28e1e383-f9b8-4d10-9503-71fed48f7777" (UID: "28e1e383-f9b8-4d10-9503-71fed48f7777"). InnerVolumeSpecName "kube-api-access-g28mx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.349900 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c74d74d5-edae-4bb5-a9be-3fc732d3de84-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.349943 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qnt7\" (UniqueName: \"kubernetes.io/projected/c74d74d5-edae-4bb5-a9be-3fc732d3de84-kube-api-access-2qnt7\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.349960 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g28mx\" (UniqueName: \"kubernetes.io/projected/28e1e383-f9b8-4d10-9503-71fed48f7777-kube-api-access-g28mx\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.349978 4793 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c74d74d5-edae-4bb5-a9be-3fc732d3de84-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.369761 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.369818 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-barbican-api:watcher_latest" Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.369930 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:38.102.83.195:5001/podified-master-centos10/openstack-barbican-api:watcher_latest,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xgtxm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-wlfpp_openstack(64b480ab-f615-4d0b-9b56-ef6d0acf8955): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.371250 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-wlfpp" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.657875 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-799bb4b979-hxnqj" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.657864 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-799bb4b979-hxnqj" event={"ID":"c74d74d5-edae-4bb5-a9be-3fc732d3de84","Type":"ContainerDied","Data":"e6345f21144fd77d0acc7bed9a185f8eb3e5bf968d0c26c35c4f183f7f11aa16"} Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.663177 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7468b88d47-6g4mq" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.677036 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7468b88d47-6g4mq" event={"ID":"28e1e383-f9b8-4d10-9503-71fed48f7777","Type":"ContainerDied","Data":"65293e86f329ff150dd2721193e4febc8a99d147e07941e1221c9bca1343fb85"} Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.679871 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-barbican-api:watcher_latest\\\"\"" pod="openstack/barbican-db-sync-wlfpp" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.707301 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: E0127 20:26:27.707791 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" containerName="watcher-db-sync" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.707816 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" containerName="watcher-db-sync" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.708061 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" containerName="watcher-db-sync" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.708821 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.712765 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-8fk8d" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.713007 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.746810 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.824732 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efeab187-5a5f-4b7d-b05e-fe86588cea2f" path="/var/lib/kubelet/pods/efeab187-5a5f-4b7d-b05e-fe86588cea2f/volumes" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.829081 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.831121 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.834262 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.853163 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.858811 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.858871 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.859427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.859469 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.859641 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzw87\" (UniqueName: \"kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.865648 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7468b88d47-6g4mq"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.907994 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.909309 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.911211 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.921492 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.948065 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.956727 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961737 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961784 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961807 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961846 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjgkw\" (UniqueName: \"kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961868 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961888 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzw87\" (UniqueName: \"kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.961982 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.962274 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.963269 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.963307 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.963345 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.967146 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-799bb4b979-hxnqj"] Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.970705 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.970932 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.971603 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:27 crc kubenswrapper[4793]: I0127 20:26:27.980189 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzw87\" (UniqueName: \"kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87\") pod \"watcher-decision-engine-0\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.055220 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068434 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068504 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjgkw\" (UniqueName: \"kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068537 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068657 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-config-data\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068707 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068778 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/045591bb-dd8c-437e-9cf8-0e0b520fc49d-logs\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068818 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068843 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.068866 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv9s7\" (UniqueName: \"kubernetes.io/projected/045591bb-dd8c-437e-9cf8-0e0b520fc49d-kube-api-access-jv9s7\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.070030 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.072323 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.073373 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.076024 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.088701 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjgkw\" (UniqueName: \"kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw\") pod \"watcher-api-0\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.160174 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.170028 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.170283 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv9s7\" (UniqueName: \"kubernetes.io/projected/045591bb-dd8c-437e-9cf8-0e0b520fc49d-kube-api-access-jv9s7\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.170835 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-config-data\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.171033 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/045591bb-dd8c-437e-9cf8-0e0b520fc49d-logs\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.171627 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/045591bb-dd8c-437e-9cf8-0e0b520fc49d-logs\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.174252 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.176912 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/045591bb-dd8c-437e-9cf8-0e0b520fc49d-config-data\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.197336 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv9s7\" (UniqueName: \"kubernetes.io/projected/045591bb-dd8c-437e-9cf8-0e0b520fc49d-kube-api-access-jv9s7\") pod \"watcher-applier-0\" (UID: \"045591bb-dd8c-437e-9cf8-0e0b520fc49d\") " pod="openstack/watcher-applier-0" Jan 27 20:26:28 crc kubenswrapper[4793]: I0127 20:26:28.241934 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 27 20:26:29 crc kubenswrapper[4793]: W0127 20:26:29.831465 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec364c41_f8df_4a06_83b8_959161dedc13.slice/crio-d7410c2b162cde2994c27f4e4ab2a8d0cb0c154ade5e160b3fcf200de589fdc2 WatchSource:0}: Error finding container d7410c2b162cde2994c27f4e4ab2a8d0cb0c154ade5e160b3fcf200de589fdc2: Status 404 returned error can't find the container with id d7410c2b162cde2994c27f4e4ab2a8d0cb0c154ade5e160b3fcf200de589fdc2 Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.866840 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28e1e383-f9b8-4d10-9503-71fed48f7777" path="/var/lib/kubelet/pods/28e1e383-f9b8-4d10-9503-71fed48f7777/volumes" Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.870802 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c74d74d5-edae-4bb5-a9be-3fc732d3de84" path="/var/lib/kubelet/pods/c74d74d5-edae-4bb5-a9be-3fc732d3de84/volumes" Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.871471 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.875676 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dbdlw"] Jan 27 20:26:29 crc kubenswrapper[4793]: E0127 20:26:29.932205 4793 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Jan 27 20:26:29 crc kubenswrapper[4793]: E0127 20:26:29.932470 4793 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.195:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Jan 27 20:26:29 crc kubenswrapper[4793]: E0127 20:26:29.932617 4793 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.195:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pvc7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-cql8q_openstack(a07ca8f7-3387-4f58-a094-26d491028752): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 20:26:29 crc kubenswrapper[4793]: E0127 20:26:29.933765 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-cql8q" podUID="a07ca8f7-3387-4f58-a094-26d491028752" Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.965034 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 27 20:26:29 crc kubenswrapper[4793]: I0127 20:26:29.979904 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:26:30 crc kubenswrapper[4793]: W0127 20:26:30.329117 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod045591bb_dd8c_437e_9cf8_0e0b520fc49d.slice/crio-8cb5bc0bac88f9479c6111e93635b6c8d03f9090b74b037c49db4082220578b7 WatchSource:0}: Error finding container 8cb5bc0bac88f9479c6111e93635b6c8d03f9090b74b037c49db4082220578b7: Status 404 returned error can't find the container with id 8cb5bc0bac88f9479c6111e93635b6c8d03f9090b74b037c49db4082220578b7 Jan 27 20:26:30 crc kubenswrapper[4793]: W0127 20:26:30.332066 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffc46614_5f6d_40ad_a388_1ff326d22ee6.slice/crio-d58b45698ec3b203a68b847f573b5a69ed4db504290a5c4507f43ea577d2b3b9 WatchSource:0}: Error finding container d58b45698ec3b203a68b847f573b5a69ed4db504290a5c4507f43ea577d2b3b9: Status 404 returned error can't find the container with id d58b45698ec3b203a68b847f573b5a69ed4db504290a5c4507f43ea577d2b3b9 Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.388573 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.714824 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerStarted","Data":"98563a4eb2a696935e83a51b8bd4a46c15a10a9210e8057d81a360e02c721d48"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.715198 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerStarted","Data":"c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.718233 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerStarted","Data":"6dc02795c4c7637cdcc6137486ed51c90024c153925a04dbeb938f88b6d3f423"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.718296 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerStarted","Data":"d7410c2b162cde2994c27f4e4ab2a8d0cb0c154ade5e160b3fcf200de589fdc2"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.720463 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dbdlw" event={"ID":"889b7838-f7be-4969-a167-9ff1b6ce04ef","Type":"ContainerStarted","Data":"4ca38b74a3e18d4a88f92129263b6a2af7e1b488760f5e51943c8f3a6dcbe30d"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.720518 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dbdlw" event={"ID":"889b7838-f7be-4969-a167-9ff1b6ce04ef","Type":"ContainerStarted","Data":"5ac37f7bbae0c63ff992700f41b621e2d99bbd2042fb615e4f4a2b0f4f0b86ee"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.723672 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"8cb5bc0bac88f9479c6111e93635b6c8d03f9090b74b037c49db4082220578b7"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.725318 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ldntm" event={"ID":"cc67d37b-e18b-47bc-8328-f1f3145f9dc9","Type":"ContainerStarted","Data":"c86439cf944a06eaa2f91514107ad288144f532ea1940bd584201638b93861dd"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.727895 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerStarted","Data":"171f806f43a9c519891fb1a87d398d8a4ec7313a80c4dcaff2917449857ae3ca"} Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.733061 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"d58b45698ec3b203a68b847f573b5a69ed4db504290a5c4507f43ea577d2b3b9"} Jan 27 20:26:30 crc kubenswrapper[4793]: E0127 20:26:30.734856 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.195:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-cql8q" podUID="a07ca8f7-3387-4f58-a094-26d491028752" Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.742851 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-54fb8bbf88-42dqw" podStartSLOduration=4.249154783 podStartE2EDuration="42.74282832s" podCreationTimestamp="2026-01-27 20:25:48 +0000 UTC" firstStartedPulling="2026-01-27 20:25:50.612283667 +0000 UTC m=+1376.002536823" lastFinishedPulling="2026-01-27 20:26:29.105957204 +0000 UTC m=+1414.496210360" observedRunningTime="2026-01-27 20:26:30.738535892 +0000 UTC m=+1416.128789068" watchObservedRunningTime="2026-01-27 20:26:30.74282832 +0000 UTC m=+1416.133081486" Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.768818 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ldntm" podStartSLOduration=2.504901208 podStartE2EDuration="51.768795303s" podCreationTimestamp="2026-01-27 20:25:39 +0000 UTC" firstStartedPulling="2026-01-27 20:25:41.174578739 +0000 UTC m=+1366.564831895" lastFinishedPulling="2026-01-27 20:26:30.438472834 +0000 UTC m=+1415.828725990" observedRunningTime="2026-01-27 20:26:30.764774022 +0000 UTC m=+1416.155027178" watchObservedRunningTime="2026-01-27 20:26:30.768795303 +0000 UTC m=+1416.159048459" Jan 27 20:26:30 crc kubenswrapper[4793]: I0127 20:26:30.813366 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dbdlw" podStartSLOduration=19.813320115 podStartE2EDuration="19.813320115s" podCreationTimestamp="2026-01-27 20:26:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:30.811038797 +0000 UTC m=+1416.201291953" watchObservedRunningTime="2026-01-27 20:26:30.813320115 +0000 UTC m=+1416.203573271" Jan 27 20:26:31 crc kubenswrapper[4793]: I0127 20:26:31.752291 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerStarted","Data":"ece23fb8bf5c4e7f8fd90aeba198b768d12734e9b5aa28e6fa7396c92f0948ee"} Jan 27 20:26:31 crc kubenswrapper[4793]: I0127 20:26:31.752627 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 27 20:26:31 crc kubenswrapper[4793]: I0127 20:26:31.781935 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=4.78191282 podStartE2EDuration="4.78191282s" podCreationTimestamp="2026-01-27 20:26:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:31.780141006 +0000 UTC m=+1417.170394162" watchObservedRunningTime="2026-01-27 20:26:31.78191282 +0000 UTC m=+1417.172165976" Jan 27 20:26:32 crc kubenswrapper[4793]: I0127 20:26:32.760183 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"e161bd49e2620c5e94ec1676c61e7c7d3c5c15b3f5a6d064f7ef87e6ed797451"} Jan 27 20:26:32 crc kubenswrapper[4793]: I0127 20:26:32.764081 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"9ce5db71cb2575820736cbf159d7a38974ec67942cb1decabc18705459a26743"} Jan 27 20:26:32 crc kubenswrapper[4793]: I0127 20:26:32.785364 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=4.319641487 podStartE2EDuration="5.785348263s" podCreationTimestamp="2026-01-27 20:26:27 +0000 UTC" firstStartedPulling="2026-01-27 20:26:30.391009398 +0000 UTC m=+1415.781262554" lastFinishedPulling="2026-01-27 20:26:31.856716174 +0000 UTC m=+1417.246969330" observedRunningTime="2026-01-27 20:26:32.777913345 +0000 UTC m=+1418.168166501" watchObservedRunningTime="2026-01-27 20:26:32.785348263 +0000 UTC m=+1418.175601419" Jan 27 20:26:32 crc kubenswrapper[4793]: I0127 20:26:32.806232 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=4.339933828 podStartE2EDuration="5.806214388s" podCreationTimestamp="2026-01-27 20:26:27 +0000 UTC" firstStartedPulling="2026-01-27 20:26:30.388315081 +0000 UTC m=+1415.778568237" lastFinishedPulling="2026-01-27 20:26:31.854595641 +0000 UTC m=+1417.244848797" observedRunningTime="2026-01-27 20:26:32.800081494 +0000 UTC m=+1418.190334650" watchObservedRunningTime="2026-01-27 20:26:32.806214388 +0000 UTC m=+1418.196467534" Jan 27 20:26:33 crc kubenswrapper[4793]: I0127 20:26:33.160692 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 27 20:26:33 crc kubenswrapper[4793]: I0127 20:26:33.242872 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:26:33 crc kubenswrapper[4793]: I0127 20:26:33.771367 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:26:34 crc kubenswrapper[4793]: I0127 20:26:34.957419 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 27 20:26:35 crc kubenswrapper[4793]: I0127 20:26:35.793598 4793 generic.go:334] "Generic (PLEG): container finished" podID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" containerID="c86439cf944a06eaa2f91514107ad288144f532ea1940bd584201638b93861dd" exitCode=0 Jan 27 20:26:35 crc kubenswrapper[4793]: I0127 20:26:35.793685 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ldntm" event={"ID":"cc67d37b-e18b-47bc-8328-f1f3145f9dc9","Type":"ContainerDied","Data":"c86439cf944a06eaa2f91514107ad288144f532ea1940bd584201638b93861dd"} Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.804385 4793 generic.go:334] "Generic (PLEG): container finished" podID="889b7838-f7be-4969-a167-9ff1b6ce04ef" containerID="4ca38b74a3e18d4a88f92129263b6a2af7e1b488760f5e51943c8f3a6dcbe30d" exitCode=0 Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.804477 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dbdlw" event={"ID":"889b7838-f7be-4969-a167-9ff1b6ce04ef","Type":"ContainerDied","Data":"4ca38b74a3e18d4a88f92129263b6a2af7e1b488760f5e51943c8f3a6dcbe30d"} Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.806833 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="9ce5db71cb2575820736cbf159d7a38974ec67942cb1decabc18705459a26743" exitCode=1 Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.806880 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"9ce5db71cb2575820736cbf159d7a38974ec67942cb1decabc18705459a26743"} Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.807204 4793 scope.go:117] "RemoveContainer" containerID="9ce5db71cb2575820736cbf159d7a38974ec67942cb1decabc18705459a26743" Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.810488 4793 generic.go:334] "Generic (PLEG): container finished" podID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerID="e161bd49e2620c5e94ec1676c61e7c7d3c5c15b3f5a6d064f7ef87e6ed797451" exitCode=1 Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.810720 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"e161bd49e2620c5e94ec1676c61e7c7d3c5c15b3f5a6d064f7ef87e6ed797451"} Jan 27 20:26:36 crc kubenswrapper[4793]: I0127 20:26:36.811200 4793 scope.go:117] "RemoveContainer" containerID="e161bd49e2620c5e94ec1676c61e7c7d3c5c15b3f5a6d064f7ef87e6ed797451" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.257284 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ldntm" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.408958 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs\") pod \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409024 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts\") pod \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409063 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvdfp\" (UniqueName: \"kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp\") pod \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409086 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data\") pod \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409154 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle\") pod \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\" (UID: \"cc67d37b-e18b-47bc-8328-f1f3145f9dc9\") " Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409505 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs" (OuterVolumeSpecName: "logs") pod "cc67d37b-e18b-47bc-8328-f1f3145f9dc9" (UID: "cc67d37b-e18b-47bc-8328-f1f3145f9dc9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.409853 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.415958 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts" (OuterVolumeSpecName: "scripts") pod "cc67d37b-e18b-47bc-8328-f1f3145f9dc9" (UID: "cc67d37b-e18b-47bc-8328-f1f3145f9dc9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.416202 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp" (OuterVolumeSpecName: "kube-api-access-gvdfp") pod "cc67d37b-e18b-47bc-8328-f1f3145f9dc9" (UID: "cc67d37b-e18b-47bc-8328-f1f3145f9dc9"). InnerVolumeSpecName "kube-api-access-gvdfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.440064 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data" (OuterVolumeSpecName: "config-data") pod "cc67d37b-e18b-47bc-8328-f1f3145f9dc9" (UID: "cc67d37b-e18b-47bc-8328-f1f3145f9dc9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.446653 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc67d37b-e18b-47bc-8328-f1f3145f9dc9" (UID: "cc67d37b-e18b-47bc-8328-f1f3145f9dc9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.511653 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.511870 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvdfp\" (UniqueName: \"kubernetes.io/projected/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-kube-api-access-gvdfp\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.511951 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.512011 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc67d37b-e18b-47bc-8328-f1f3145f9dc9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.821781 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca"} Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.824315 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ldntm" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.824643 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ldntm" event={"ID":"cc67d37b-e18b-47bc-8328-f1f3145f9dc9","Type":"ContainerDied","Data":"c62d47c31f81cd466ab8adeacb0997fee02f40e964df93307ada98991c7c9fc4"} Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.824681 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c62d47c31f81cd466ab8adeacb0997fee02f40e964df93307ada98991c7c9fc4" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.826752 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerStarted","Data":"ddc3ebb1921f3d6d9ae0e98f38b88a4d329efb259009ff3f475c17fe7d1fd149"} Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.841751 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926"} Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.847497 4793 generic.go:334] "Generic (PLEG): container finished" podID="03131418-ea5d-47bd-906c-8a93c2712b1c" containerID="d2475a4da699bc85b2811da31a71ec1292a305fcfd89bf10807ca73a96f9deb7" exitCode=0 Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.847704 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gds2t" event={"ID":"03131418-ea5d-47bd-906c-8a93c2712b1c","Type":"ContainerDied","Data":"d2475a4da699bc85b2811da31a71ec1292a305fcfd89bf10807ca73a96f9deb7"} Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.961160 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-988699dd4-wjjzw"] Jan 27 20:26:37 crc kubenswrapper[4793]: E0127 20:26:37.967811 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" containerName="placement-db-sync" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.967851 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" containerName="placement-db-sync" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.968185 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" containerName="placement-db-sync" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.969183 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.973110 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-988699dd4-wjjzw"] Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.975748 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.976026 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.976199 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.976322 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 27 20:26:37 crc kubenswrapper[4793]: I0127 20:26:37.976831 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-w5n2h" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.056298 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.098061 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.128714 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-config-data\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.128846 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-public-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.128882 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5st86\" (UniqueName: \"kubernetes.io/projected/98e27e40-e02e-41a8-8935-f29b264435a7-kube-api-access-5st86\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.128965 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-combined-ca-bundle\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.129231 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e27e40-e02e-41a8-8935-f29b264435a7-logs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.129304 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-internal-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.129340 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-scripts\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.161045 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.178247 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230651 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e27e40-e02e-41a8-8935-f29b264435a7-logs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230712 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-internal-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230735 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-scripts\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230789 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-config-data\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230835 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-public-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230858 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5st86\" (UniqueName: \"kubernetes.io/projected/98e27e40-e02e-41a8-8935-f29b264435a7-kube-api-access-5st86\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.230909 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-combined-ca-bundle\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.232106 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e27e40-e02e-41a8-8935-f29b264435a7-logs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.236250 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-scripts\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.237050 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-config-data\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.238130 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-internal-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.239326 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-combined-ca-bundle\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.248793 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.248839 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.255080 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e27e40-e02e-41a8-8935-f29b264435a7-public-tls-certs\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.261001 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5st86\" (UniqueName: \"kubernetes.io/projected/98e27e40-e02e-41a8-8935-f29b264435a7-kube-api-access-5st86\") pod \"placement-988699dd4-wjjzw\" (UID: \"98e27e40-e02e-41a8-8935-f29b264435a7\") " pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.283331 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.290350 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.363512 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.545946 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.546338 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.546436 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.546491 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.546531 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.546665 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdnlk\" (UniqueName: \"kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk\") pod \"889b7838-f7be-4969-a167-9ff1b6ce04ef\" (UID: \"889b7838-f7be-4969-a167-9ff1b6ce04ef\") " Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.552603 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.552962 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk" (OuterVolumeSpecName: "kube-api-access-qdnlk") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "kube-api-access-qdnlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.555649 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.559119 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts" (OuterVolumeSpecName: "scripts") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.594492 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.600329 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data" (OuterVolumeSpecName: "config-data") pod "889b7838-f7be-4969-a167-9ff1b6ce04ef" (UID: "889b7838-f7be-4969-a167-9ff1b6ce04ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651796 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdnlk\" (UniqueName: \"kubernetes.io/projected/889b7838-f7be-4969-a167-9ff1b6ce04ef-kube-api-access-qdnlk\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651840 4793 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651852 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651861 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651870 4793 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.651878 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889b7838-f7be-4969-a167-9ff1b6ce04ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.858300 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-988699dd4-wjjzw"] Jan 27 20:26:38 crc kubenswrapper[4793]: W0127 20:26:38.881700 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98e27e40_e02e_41a8_8935_f29b264435a7.slice/crio-cba537027ba57e9ce19b66ce9d1e2cb27f60bb35942d78b643d33bdbc480e288 WatchSource:0}: Error finding container cba537027ba57e9ce19b66ce9d1e2cb27f60bb35942d78b643d33bdbc480e288: Status 404 returned error can't find the container with id cba537027ba57e9ce19b66ce9d1e2cb27f60bb35942d78b643d33bdbc480e288 Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.896760 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dbdlw" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.903807 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dbdlw" event={"ID":"889b7838-f7be-4969-a167-9ff1b6ce04ef","Type":"ContainerDied","Data":"5ac37f7bbae0c63ff992700f41b621e2d99bbd2042fb615e4f4a2b0f4f0b86ee"} Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.903846 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ac37f7bbae0c63ff992700f41b621e2d99bbd2042fb615e4f4a2b0f4f0b86ee" Jan 27 20:26:38 crc kubenswrapper[4793]: I0127 20:26:38.904896 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.043996 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-74bfddb9f7-8qtb8"] Jan 27 20:26:39 crc kubenswrapper[4793]: E0127 20:26:39.044394 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889b7838-f7be-4969-a167-9ff1b6ce04ef" containerName="keystone-bootstrap" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.044410 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="889b7838-f7be-4969-a167-9ff1b6ce04ef" containerName="keystone-bootstrap" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.044811 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="889b7838-f7be-4969-a167-9ff1b6ce04ef" containerName="keystone-bootstrap" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.045391 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.049256 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.049511 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.072052 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.072271 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.072573 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.072741 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5strr" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.072825 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.079176 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-74bfddb9f7-8qtb8"] Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.079935 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.180767 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctkdf\" (UniqueName: \"kubernetes.io/projected/6ed6109b-d066-45e9-81e5-7d7a42c55b77-kube-api-access-ctkdf\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181115 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-fernet-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181254 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-internal-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181347 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-public-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181491 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-credential-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181593 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-config-data\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181699 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-combined-ca-bundle\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.181767 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-scripts\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.200077 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283074 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-combined-ca-bundle\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283159 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-scripts\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283230 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctkdf\" (UniqueName: \"kubernetes.io/projected/6ed6109b-d066-45e9-81e5-7d7a42c55b77-kube-api-access-ctkdf\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283299 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-fernet-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283317 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-internal-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283357 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-public-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283431 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-credential-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.283466 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-config-data\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.294718 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.295255 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.295961 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-config-data\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.299002 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-scripts\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.299107 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-internal-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.299872 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-combined-ca-bundle\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.308071 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-credential-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.308285 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-fernet-keys\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.317762 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ed6109b-d066-45e9-81e5-7d7a42c55b77-public-tls-certs\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.318016 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctkdf\" (UniqueName: \"kubernetes.io/projected/6ed6109b-d066-45e9-81e5-7d7a42c55b77-kube-api-access-ctkdf\") pod \"keystone-74bfddb9f7-8qtb8\" (UID: \"6ed6109b-d066-45e9-81e5-7d7a42c55b77\") " pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.468143 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.646655 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gds2t" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.813847 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtqcs\" (UniqueName: \"kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs\") pod \"03131418-ea5d-47bd-906c-8a93c2712b1c\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.814164 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data\") pod \"03131418-ea5d-47bd-906c-8a93c2712b1c\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.814186 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle\") pod \"03131418-ea5d-47bd-906c-8a93c2712b1c\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.814215 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data\") pod \"03131418-ea5d-47bd-906c-8a93c2712b1c\" (UID: \"03131418-ea5d-47bd-906c-8a93c2712b1c\") " Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.825138 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "03131418-ea5d-47bd-906c-8a93c2712b1c" (UID: "03131418-ea5d-47bd-906c-8a93c2712b1c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.831330 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs" (OuterVolumeSpecName: "kube-api-access-dtqcs") pod "03131418-ea5d-47bd-906c-8a93c2712b1c" (UID: "03131418-ea5d-47bd-906c-8a93c2712b1c"). InnerVolumeSpecName "kube-api-access-dtqcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.907192 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03131418-ea5d-47bd-906c-8a93c2712b1c" (UID: "03131418-ea5d-47bd-906c-8a93c2712b1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.931028 4793 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.931062 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtqcs\" (UniqueName: \"kubernetes.io/projected/03131418-ea5d-47bd-906c-8a93c2712b1c-kube-api-access-dtqcs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.931084 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.961011 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data" (OuterVolumeSpecName: "config-data") pod "03131418-ea5d-47bd-906c-8a93c2712b1c" (UID: "03131418-ea5d-47bd-906c-8a93c2712b1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:39 crc kubenswrapper[4793]: I0127 20:26:39.967861 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gds2t" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.011720 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-988699dd4-wjjzw" event={"ID":"98e27e40-e02e-41a8-8935-f29b264435a7","Type":"ContainerStarted","Data":"c572c1dff2892983d31f298dc6ba4eae8f18465c177cf896882d52afa8adbc9e"} Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.011754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-988699dd4-wjjzw" event={"ID":"98e27e40-e02e-41a8-8935-f29b264435a7","Type":"ContainerStarted","Data":"cba537027ba57e9ce19b66ce9d1e2cb27f60bb35942d78b643d33bdbc480e288"} Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.011766 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gds2t" event={"ID":"03131418-ea5d-47bd-906c-8a93c2712b1c","Type":"ContainerDied","Data":"001d00b05aefcfd618f032911084bf2d9b99def427acc0f5f476bb2efafe651d"} Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.011779 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="001d00b05aefcfd618f032911084bf2d9b99def427acc0f5f476bb2efafe651d" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.032494 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03131418-ea5d-47bd-906c-8a93c2712b1c-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.385559 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-74bfddb9f7-8qtb8"] Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.602073 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:26:40 crc kubenswrapper[4793]: E0127 20:26:40.602586 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" containerName="glance-db-sync" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.602604 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" containerName="glance-db-sync" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.602864 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" containerName="glance-db-sync" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.603936 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.626897 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.683014 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.684919 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnc5x\" (UniqueName: \"kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.685040 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.685239 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.685420 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.685582 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.788481 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.788798 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789013 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789101 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnc5x\" (UniqueName: \"kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789181 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789280 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789382 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.789516 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.790127 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.790151 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.790179 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.816700 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnc5x\" (UniqueName: \"kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x\") pod \"dnsmasq-dns-69d66b6dcf-bd258\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.929114 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:40 crc kubenswrapper[4793]: I0127 20:26:40.986133 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-74bfddb9f7-8qtb8" event={"ID":"6ed6109b-d066-45e9-81e5-7d7a42c55b77","Type":"ContainerStarted","Data":"24d51f8cf11efe31fa1b9f57868216483fb6093c219e75b94fadb883f5a03623"} Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.491210 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.493963 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.499159 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.499579 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.500258 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-frbrb" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.507147 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.563769 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.610710 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611017 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611050 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611073 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvlc5\" (UniqueName: \"kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611092 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611116 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.611196 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713223 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713326 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713357 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713386 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713408 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvlc5\" (UniqueName: \"kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713425 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.713447 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.714434 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.714754 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.715010 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.726852 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.726894 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.734969 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.737516 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvlc5\" (UniqueName: \"kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.742015 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.850151 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.851839 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.852800 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.858505 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 20:26:41 crc kubenswrapper[4793]: I0127 20:26:41.885245 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.008754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerStarted","Data":"c87214058edf38413d6ff30f3bb86763f2e4142171604e1b22d3f264bbc834f2"} Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.028789 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svvx5\" (UniqueName: \"kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.028918 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.028968 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.029043 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.029080 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.029133 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.029176 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134142 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svvx5\" (UniqueName: \"kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134392 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134452 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134569 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.134612 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.135087 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.135746 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.135964 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.136292 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.140642 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.141424 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.142575 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.156571 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svvx5\" (UniqueName: \"kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.181002 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.369564 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.459755 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:42 crc kubenswrapper[4793]: W0127 20:26:42.460558 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda596596e_7206_49bb_92c8_9e8e551a8744.slice/crio-a5bf3bf9a34dc5d4756ae531dd42557b855f11e3db4d0617d46d2334a201c100 WatchSource:0}: Error finding container a5bf3bf9a34dc5d4756ae531dd42557b855f11e3db4d0617d46d2334a201c100: Status 404 returned error can't find the container with id a5bf3bf9a34dc5d4756ae531dd42557b855f11e3db4d0617d46d2334a201c100 Jan 27 20:26:42 crc kubenswrapper[4793]: I0127 20:26:42.936938 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:42 crc kubenswrapper[4793]: W0127 20:26:42.949227 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e371b4f_e558_4684_ac2b_952843dc8b34.slice/crio-f289840e41800807f9bd84b00ae3156a165c6503431d873fa7b7cd96f9dfbcc5 WatchSource:0}: Error finding container f289840e41800807f9bd84b00ae3156a165c6503431d873fa7b7cd96f9dfbcc5: Status 404 returned error can't find the container with id f289840e41800807f9bd84b00ae3156a165c6503431d873fa7b7cd96f9dfbcc5 Jan 27 20:26:43 crc kubenswrapper[4793]: I0127 20:26:43.026428 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerStarted","Data":"a5bf3bf9a34dc5d4756ae531dd42557b855f11e3db4d0617d46d2334a201c100"} Jan 27 20:26:43 crc kubenswrapper[4793]: I0127 20:26:43.032895 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerStarted","Data":"f289840e41800807f9bd84b00ae3156a165c6503431d873fa7b7cd96f9dfbcc5"} Jan 27 20:26:43 crc kubenswrapper[4793]: I0127 20:26:43.864873 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:43 crc kubenswrapper[4793]: I0127 20:26:43.865367 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api-log" containerID="cri-o://6dc02795c4c7637cdcc6137486ed51c90024c153925a04dbeb938f88b6d3f423" gracePeriod=30 Jan 27 20:26:43 crc kubenswrapper[4793]: I0127 20:26:43.865803 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api" containerID="cri-o://ece23fb8bf5c4e7f8fd90aeba198b768d12734e9b5aa28e6fa7396c92f0948ee" gracePeriod=30 Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.157092 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerStarted","Data":"c072cc6ae3de469d9fa68dd964737d4572baeb24ab7ae451767be04389804df1"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.172216 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerStarted","Data":"4865e9443aa9b33c3c8fb6601dc6d8a93eef5b490a5fc2e285aef597714ea3f4"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.173999 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-74bfddb9f7-8qtb8" event={"ID":"6ed6109b-d066-45e9-81e5-7d7a42c55b77","Type":"ContainerStarted","Data":"a32a50feb075e259f11d7b3e29db256bec57f8b6162590de40136e30da8613a0"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.175175 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.177053 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerStarted","Data":"70db2d42a7872f4c4f6ef88592a60344afdbdbe1d434ffb74a638b91a8d902e0"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.185805 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57fc549f96-h7nth" event={"ID":"598878f3-c1fc-481f-ad69-dacba44a1ccc","Type":"ContainerStarted","Data":"237306071e1f2df12ff0d8aa223b26e3719a7f1c4a0c8ca588b38b720fa1241f"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.209567 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-988699dd4-wjjzw" event={"ID":"98e27e40-e02e-41a8-8935-f29b264435a7","Type":"ContainerStarted","Data":"8c728cf6e02b50f61dcb98c7e5c16ebd6a808be3b850d905c3bcf8a3a2e5ba2c"} Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.210780 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.210806 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.217695 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-74bfddb9f7-8qtb8" podStartSLOduration=6.217676228 podStartE2EDuration="6.217676228s" podCreationTimestamp="2026-01-27 20:26:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:44.216294763 +0000 UTC m=+1429.606547919" watchObservedRunningTime="2026-01-27 20:26:44.217676228 +0000 UTC m=+1429.607929384" Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.255325 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-988699dd4-wjjzw" podStartSLOduration=7.255299306 podStartE2EDuration="7.255299306s" podCreationTimestamp="2026-01-27 20:26:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:44.246989357 +0000 UTC m=+1429.637242523" watchObservedRunningTime="2026-01-27 20:26:44.255299306 +0000 UTC m=+1429.645552462" Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.278891 4793 generic.go:334] "Generic (PLEG): container finished" podID="ec364c41-f8df-4a06-83b8-959161dedc13" containerID="6dc02795c4c7637cdcc6137486ed51c90024c153925a04dbeb938f88b6d3f423" exitCode=143 Jan 27 20:26:44 crc kubenswrapper[4793]: I0127 20:26:44.278966 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerDied","Data":"6dc02795c4c7637cdcc6137486ed51c90024c153925a04dbeb938f88b6d3f423"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.291284 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerStarted","Data":"0928be1603af04b0cb28ebca5d610ddd94eea4015809b5bbc30825ebd298aa32"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.303981 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" exitCode=1 Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.304054 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.304106 4793 scope.go:117] "RemoveContainer" containerID="9ce5db71cb2575820736cbf159d7a38974ec67942cb1decabc18705459a26743" Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.304788 4793 scope.go:117] "RemoveContainer" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" Jan 27 20:26:45 crc kubenswrapper[4793]: E0127 20:26:45.305072 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.311002 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerStarted","Data":"26a607fc9ebf7dea653251515af49cbf1bf3dfb9c0aeab8200d04e5c59a27e28"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.323518 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57fc549f96-h7nth" event={"ID":"598878f3-c1fc-481f-ad69-dacba44a1ccc","Type":"ContainerStarted","Data":"186611f0a6b205ac9b5d6e692def864b6a8fad0c9b7ce18d1d02c7446c2253e8"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.331514 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.33148872 podStartE2EDuration="5.33148872s" podCreationTimestamp="2026-01-27 20:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:45.330391012 +0000 UTC m=+1430.720644178" watchObservedRunningTime="2026-01-27 20:26:45.33148872 +0000 UTC m=+1430.721741876" Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.335019 4793 generic.go:334] "Generic (PLEG): container finished" podID="ec364c41-f8df-4a06-83b8-959161dedc13" containerID="ece23fb8bf5c4e7f8fd90aeba198b768d12734e9b5aa28e6fa7396c92f0948ee" exitCode=0 Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.335106 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerDied","Data":"ece23fb8bf5c4e7f8fd90aeba198b768d12734e9b5aa28e6fa7396c92f0948ee"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.355626 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-57fc549f96-h7nth" podStartSLOduration=-9223371979.499168 podStartE2EDuration="57.355606728s" podCreationTimestamp="2026-01-27 20:25:48 +0000 UTC" firstStartedPulling="2026-01-27 20:25:56.848811101 +0000 UTC m=+1382.239064257" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:45.35329928 +0000 UTC m=+1430.743552456" watchObservedRunningTime="2026-01-27 20:26:45.355606728 +0000 UTC m=+1430.745859884" Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.359090 4793 generic.go:334] "Generic (PLEG): container finished" podID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerID="c072cc6ae3de469d9fa68dd964737d4572baeb24ab7ae451767be04389804df1" exitCode=0 Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.359334 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerDied","Data":"c072cc6ae3de469d9fa68dd964737d4572baeb24ab7ae451767be04389804df1"} Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.384347 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.384327661 podStartE2EDuration="5.384327661s" podCreationTimestamp="2026-01-27 20:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:45.38229894 +0000 UTC m=+1430.772552096" watchObservedRunningTime="2026-01-27 20:26:45.384327661 +0000 UTC m=+1430.774580817" Jan 27 20:26:45 crc kubenswrapper[4793]: I0127 20:26:45.961353 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:46 crc kubenswrapper[4793]: I0127 20:26:46.029182 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:46 crc kubenswrapper[4793]: I0127 20:26:46.106240 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:26:46 crc kubenswrapper[4793]: I0127 20:26:46.373763 4793 generic.go:334] "Generic (PLEG): container finished" podID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerID="b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926" exitCode=1 Jan 27 20:26:46 crc kubenswrapper[4793]: I0127 20:26:46.373881 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926"} Jan 27 20:26:46 crc kubenswrapper[4793]: I0127 20:26:46.376642 4793 scope.go:117] "RemoveContainer" containerID="b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926" Jan 27 20:26:46 crc kubenswrapper[4793]: E0127 20:26:46.376973 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:26:47 crc kubenswrapper[4793]: I0127 20:26:47.382997 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-log" containerID="cri-o://4865e9443aa9b33c3c8fb6601dc6d8a93eef5b490a5fc2e285aef597714ea3f4" gracePeriod=30 Jan 27 20:26:47 crc kubenswrapper[4793]: I0127 20:26:47.384215 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-log" containerID="cri-o://70db2d42a7872f4c4f6ef88592a60344afdbdbe1d434ffb74a638b91a8d902e0" gracePeriod=30 Jan 27 20:26:47 crc kubenswrapper[4793]: I0127 20:26:47.384852 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-httpd" containerID="cri-o://0928be1603af04b0cb28ebca5d610ddd94eea4015809b5bbc30825ebd298aa32" gracePeriod=30 Jan 27 20:26:47 crc kubenswrapper[4793]: I0127 20:26:47.385167 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-httpd" containerID="cri-o://26a607fc9ebf7dea653251515af49cbf1bf3dfb9c0aeab8200d04e5c59a27e28" gracePeriod=30 Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.056276 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.057206 4793 scope.go:117] "RemoveContainer" containerID="b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926" Jan 27 20:26:48 crc kubenswrapper[4793]: E0127 20:26:48.057521 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.242863 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.242921 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.242939 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.243829 4793 scope.go:117] "RemoveContainer" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" Jan 27 20:26:48 crc kubenswrapper[4793]: E0127 20:26:48.244161 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.394415 4793 generic.go:334] "Generic (PLEG): container finished" podID="a596596e-7206-49bb-92c8-9e8e551a8744" containerID="0928be1603af04b0cb28ebca5d610ddd94eea4015809b5bbc30825ebd298aa32" exitCode=0 Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.394454 4793 generic.go:334] "Generic (PLEG): container finished" podID="a596596e-7206-49bb-92c8-9e8e551a8744" containerID="4865e9443aa9b33c3c8fb6601dc6d8a93eef5b490a5fc2e285aef597714ea3f4" exitCode=143 Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.394482 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerDied","Data":"0928be1603af04b0cb28ebca5d610ddd94eea4015809b5bbc30825ebd298aa32"} Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.394521 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerDied","Data":"4865e9443aa9b33c3c8fb6601dc6d8a93eef5b490a5fc2e285aef597714ea3f4"} Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.396814 4793 generic.go:334] "Generic (PLEG): container finished" podID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerID="26a607fc9ebf7dea653251515af49cbf1bf3dfb9c0aeab8200d04e5c59a27e28" exitCode=0 Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.396836 4793 generic.go:334] "Generic (PLEG): container finished" podID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerID="70db2d42a7872f4c4f6ef88592a60344afdbdbe1d434ffb74a638b91a8d902e0" exitCode=143 Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.396850 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerDied","Data":"26a607fc9ebf7dea653251515af49cbf1bf3dfb9c0aeab8200d04e5c59a27e28"} Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.396874 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerDied","Data":"70db2d42a7872f4c4f6ef88592a60344afdbdbe1d434ffb74a638b91a8d902e0"} Jan 27 20:26:48 crc kubenswrapper[4793]: I0127 20:26:48.397720 4793 scope.go:117] "RemoveContainer" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" Jan 27 20:26:48 crc kubenswrapper[4793]: E0127 20:26:48.398026 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:26:49 crc kubenswrapper[4793]: I0127 20:26:49.699486 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:26:49 crc kubenswrapper[4793]: I0127 20:26:49.699912 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.364576 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.708002 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.876686 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjgkw\" (UniqueName: \"kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw\") pod \"ec364c41-f8df-4a06-83b8-959161dedc13\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.877392 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs\") pod \"ec364c41-f8df-4a06-83b8-959161dedc13\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.877443 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data\") pod \"ec364c41-f8df-4a06-83b8-959161dedc13\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.877567 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca\") pod \"ec364c41-f8df-4a06-83b8-959161dedc13\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.877663 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle\") pod \"ec364c41-f8df-4a06-83b8-959161dedc13\" (UID: \"ec364c41-f8df-4a06-83b8-959161dedc13\") " Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.878022 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs" (OuterVolumeSpecName: "logs") pod "ec364c41-f8df-4a06-83b8-959161dedc13" (UID: "ec364c41-f8df-4a06-83b8-959161dedc13"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.887842 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw" (OuterVolumeSpecName: "kube-api-access-bjgkw") pod "ec364c41-f8df-4a06-83b8-959161dedc13" (UID: "ec364c41-f8df-4a06-83b8-959161dedc13"). InnerVolumeSpecName "kube-api-access-bjgkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.953641 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec364c41-f8df-4a06-83b8-959161dedc13" (UID: "ec364c41-f8df-4a06-83b8-959161dedc13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.971082 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ec364c41-f8df-4a06-83b8-959161dedc13" (UID: "ec364c41-f8df-4a06-83b8-959161dedc13"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.979261 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec364c41-f8df-4a06-83b8-959161dedc13-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.979285 4793 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.979294 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.979303 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjgkw\" (UniqueName: \"kubernetes.io/projected/ec364c41-f8df-4a06-83b8-959161dedc13-kube-api-access-bjgkw\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:51 crc kubenswrapper[4793]: I0127 20:26:51.981050 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data" (OuterVolumeSpecName: "config-data") pod "ec364c41-f8df-4a06-83b8-959161dedc13" (UID: "ec364c41-f8df-4a06-83b8-959161dedc13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.090997 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec364c41-f8df-4a06-83b8-959161dedc13-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.435842 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"ec364c41-f8df-4a06-83b8-959161dedc13","Type":"ContainerDied","Data":"d7410c2b162cde2994c27f4e4ab2a8d0cb0c154ade5e160b3fcf200de589fdc2"} Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.435968 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.477509 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.495185 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.532140 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:52 crc kubenswrapper[4793]: E0127 20:26:52.532635 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.532659 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api" Jan 27 20:26:52 crc kubenswrapper[4793]: E0127 20:26:52.532699 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api-log" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.532709 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api-log" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.532912 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.532982 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api-log" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.534193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.538755 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.539229 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.539271 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.547035 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702106 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-public-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702164 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702211 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-config-data\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702232 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-logs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702340 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5v47\" (UniqueName: \"kubernetes.io/projected/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-kube-api-access-m5v47\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702385 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.702417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.753896 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.753949 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804361 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804466 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-public-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804491 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804520 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-config-data\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804539 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-logs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.804737 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5v47\" (UniqueName: \"kubernetes.io/projected/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-kube-api-access-m5v47\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.805018 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.805320 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-logs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.808882 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.808953 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.809225 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-public-tls-certs\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.809841 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-config-data\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.810199 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.822458 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5v47\" (UniqueName: \"kubernetes.io/projected/6609a1f3-1bd1-4179-99fb-2dc0f32df09d-kube-api-access-m5v47\") pod \"watcher-api-0\" (UID: \"6609a1f3-1bd1-4179-99fb-2dc0f32df09d\") " pod="openstack/watcher-api-0" Jan 27 20:26:52 crc kubenswrapper[4793]: I0127 20:26:52.900861 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.160327 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.160345 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.647972 4793 scope.go:117] "RemoveContainer" containerID="e161bd49e2620c5e94ec1676c61e7c7d3c5c15b3f5a6d064f7ef87e6ed797451" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.681833 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.847676 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec364c41-f8df-4a06-83b8-959161dedc13" path="/var/lib/kubelet/pods/ec364c41-f8df-4a06-83b8-959161dedc13/volumes" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.878695 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.907243 4793 scope.go:117] "RemoveContainer" containerID="ece23fb8bf5c4e7f8fd90aeba198b768d12734e9b5aa28e6fa7396c92f0948ee" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.908149 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:53 crc kubenswrapper[4793]: I0127 20:26:53.999787 4793 scope.go:117] "RemoveContainer" containerID="6dc02795c4c7637cdcc6137486ed51c90024c153925a04dbeb938f88b6d3f423" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.048797 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvlc5\" (UniqueName: \"kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.048890 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.048937 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.048992 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049060 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049104 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049137 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049167 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049230 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049276 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svvx5\" (UniqueName: \"kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049305 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049327 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049370 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts\") pod \"a596596e-7206-49bb-92c8-9e8e551a8744\" (UID: \"a596596e-7206-49bb-92c8-9e8e551a8744\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049390 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs" (OuterVolumeSpecName: "logs") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049404 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run\") pod \"1e371b4f-e558-4684-ac2b-952843dc8b34\" (UID: \"1e371b4f-e558-4684-ac2b-952843dc8b34\") " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049634 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs" (OuterVolumeSpecName: "logs") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049689 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.049923 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.050885 4793 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.050909 4793 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.050923 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a596596e-7206-49bb-92c8-9e8e551a8744-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.050934 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e371b4f-e558-4684-ac2b-952843dc8b34-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.059748 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts" (OuterVolumeSpecName: "scripts") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.059915 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts" (OuterVolumeSpecName: "scripts") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.060714 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.059358 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5" (OuterVolumeSpecName: "kube-api-access-bvlc5") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "kube-api-access-bvlc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.062112 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: E0127 20:26:54.074346 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.088285 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5" (OuterVolumeSpecName: "kube-api-access-svvx5") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "kube-api-access-svvx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.088648 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.104651 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.115727 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data" (OuterVolumeSpecName: "config-data") pod "a596596e-7206-49bb-92c8-9e8e551a8744" (UID: "a596596e-7206-49bb-92c8-9e8e551a8744"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.116182 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data" (OuterVolumeSpecName: "config-data") pod "1e371b4f-e558-4684-ac2b-952843dc8b34" (UID: "1e371b4f-e558-4684-ac2b-952843dc8b34"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154084 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svvx5\" (UniqueName: \"kubernetes.io/projected/1e371b4f-e558-4684-ac2b-952843dc8b34-kube-api-access-svvx5\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154139 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154155 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154167 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvlc5\" (UniqueName: \"kubernetes.io/projected/a596596e-7206-49bb-92c8-9e8e551a8744-kube-api-access-bvlc5\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154328 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154345 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a596596e-7206-49bb-92c8-9e8e551a8744-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154393 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154407 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154421 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.154432 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e371b4f-e558-4684-ac2b-952843dc8b34-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.180785 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.182727 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.256071 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.256110 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.355432 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: W0127 20:26:54.361290 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6609a1f3_1bd1_4179_99fb_2dc0f32df09d.slice/crio-de57d8c6fd660decc2d6d9e017a4088d718b2a1375d51123a4873069441b6ed2 WatchSource:0}: Error finding container de57d8c6fd660decc2d6d9e017a4088d718b2a1375d51123a4873069441b6ed2: Status 404 returned error can't find the container with id de57d8c6fd660decc2d6d9e017a4088d718b2a1375d51123a4873069441b6ed2 Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.455791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"6609a1f3-1bd1-4179-99fb-2dc0f32df09d","Type":"ContainerStarted","Data":"de57d8c6fd660decc2d6d9e017a4088d718b2a1375d51123a4873069441b6ed2"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.458306 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a596596e-7206-49bb-92c8-9e8e551a8744","Type":"ContainerDied","Data":"a5bf3bf9a34dc5d4756ae531dd42557b855f11e3db4d0617d46d2334a201c100"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.458419 4793 scope.go:117] "RemoveContainer" containerID="0928be1603af04b0cb28ebca5d610ddd94eea4015809b5bbc30825ebd298aa32" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.460002 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.485401 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wlfpp" event={"ID":"64b480ab-f615-4d0b-9b56-ef6d0acf8955","Type":"ContainerStarted","Data":"ac49f5f979165c4e7a819a2acfcda5cc4faa76cf76e1f1272e99b26b776b3b8b"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.492514 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerStarted","Data":"fbd5473a6e6a4b3624f9a6b7f32d0b0a62d9a22bf641d31377cb499b7de0fa9c"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.492653 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.495431 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.496927 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.496926 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1e371b4f-e558-4684-ac2b-952843dc8b34","Type":"ContainerDied","Data":"f289840e41800807f9bd84b00ae3156a165c6503431d873fa7b7cd96f9dfbcc5"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.500724 4793 scope.go:117] "RemoveContainer" containerID="4865e9443aa9b33c3c8fb6601dc6d8a93eef5b490a5fc2e285aef597714ea3f4" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.506621 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.508791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerStarted","Data":"9bff0f955bb0a8dfad28900c476d21c26397534e939a6b98377112c3e89c4e5a"} Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.509026 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="ceilometer-notification-agent" containerID="cri-o://171f806f43a9c519891fb1a87d398d8a4ec7313a80c4dcaff2917449857ae3ca" gracePeriod=30 Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.509329 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.509394 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="proxy-httpd" containerID="cri-o://9bff0f955bb0a8dfad28900c476d21c26397534e939a6b98377112c3e89c4e5a" gracePeriod=30 Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.509457 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="sg-core" containerID="cri-o://ddc3ebb1921f3d6d9ae0e98f38b88a4d329efb259009ff3f475c17fe7d1fd149" gracePeriod=30 Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531298 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: E0127 20:26:54.531714 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531730 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: E0127 20:26:54.531742 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531749 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: E0127 20:26:54.531775 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531782 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: E0127 20:26:54.531793 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531798 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.531991 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.532016 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-log" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.532031 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.532043 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" containerName="glance-httpd" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.535043 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-wlfpp" podStartSLOduration=3.865143942 podStartE2EDuration="1m16.535024332s" podCreationTimestamp="2026-01-27 20:25:38 +0000 UTC" firstStartedPulling="2026-01-27 20:25:41.07810944 +0000 UTC m=+1366.468362596" lastFinishedPulling="2026-01-27 20:26:53.74798982 +0000 UTC m=+1439.138242986" observedRunningTime="2026-01-27 20:26:54.521934462 +0000 UTC m=+1439.912187618" watchObservedRunningTime="2026-01-27 20:26:54.535024332 +0000 UTC m=+1439.925277488" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.541485 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.544604 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.545125 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-frbrb" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.545322 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.561715 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.579654 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.589704 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" podStartSLOduration=14.589676488 podStartE2EDuration="14.589676488s" podCreationTimestamp="2026-01-27 20:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:54.580520268 +0000 UTC m=+1439.970773434" watchObservedRunningTime="2026-01-27 20:26:54.589676488 +0000 UTC m=+1439.979929644" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.609828 4793 scope.go:117] "RemoveContainer" containerID="26a607fc9ebf7dea653251515af49cbf1bf3dfb9c0aeab8200d04e5c59a27e28" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.652238 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663306 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663458 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663587 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-292t5\" (UniqueName: \"kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663680 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663735 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663809 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.663847 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.664020 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.668049 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.681944 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.704408 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.713576 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.715225 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.741509 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.784957 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785016 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785085 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785159 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785220 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-292t5\" (UniqueName: \"kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785280 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785310 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785341 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.785864 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.788914 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.789256 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.789823 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.792677 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.797167 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.800281 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.811077 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-292t5\" (UniqueName: \"kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.814101 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.874950 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.887958 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888005 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888028 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888086 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s975l\" (UniqueName: \"kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888110 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888164 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888213 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.888293 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.908781 4793 scope.go:117] "RemoveContainer" containerID="70db2d42a7872f4c4f6ef88592a60344afdbdbe1d434ffb74a638b91a8d902e0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990097 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990414 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990449 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990482 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990520 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s975l\" (UniqueName: \"kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990596 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990706 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.990811 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.991082 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.991813 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:54 crc kubenswrapper[4793]: I0127 20:26:54.992666 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.005096 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.008791 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.015160 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.016493 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.022403 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s975l\" (UniqueName: \"kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.043566 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.273176 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.500226 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:26:55 crc kubenswrapper[4793]: W0127 20:26:55.519390 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ca404d2_1fe1_4c87_b5b5_aacc11a0525e.slice/crio-7e1375b26b6e9a6a21f8174140f7c107d7630b0be02dce46950ecce13a9a60d0 WatchSource:0}: Error finding container 7e1375b26b6e9a6a21f8174140f7c107d7630b0be02dce46950ecce13a9a60d0: Status 404 returned error can't find the container with id 7e1375b26b6e9a6a21f8174140f7c107d7630b0be02dce46950ecce13a9a60d0 Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.541196 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"6609a1f3-1bd1-4179-99fb-2dc0f32df09d","Type":"ContainerStarted","Data":"23b8490ce4975547b4bdf2942a861d87089fea979fe07c64479f096b48fe7101"} Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.541280 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"6609a1f3-1bd1-4179-99fb-2dc0f32df09d","Type":"ContainerStarted","Data":"545f7a77b67fc5bb9eff120c8b7e6df505d087a45752a4f1bef80a4eebc7bc3e"} Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.542460 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.550882 4793 generic.go:334] "Generic (PLEG): container finished" podID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerID="9bff0f955bb0a8dfad28900c476d21c26397534e939a6b98377112c3e89c4e5a" exitCode=0 Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.550923 4793 generic.go:334] "Generic (PLEG): container finished" podID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerID="ddc3ebb1921f3d6d9ae0e98f38b88a4d329efb259009ff3f475c17fe7d1fd149" exitCode=2 Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.550971 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerDied","Data":"9bff0f955bb0a8dfad28900c476d21c26397534e939a6b98377112c3e89c4e5a"} Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.550999 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerDied","Data":"ddc3ebb1921f3d6d9ae0e98f38b88a4d329efb259009ff3f475c17fe7d1fd149"} Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.555199 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cql8q" event={"ID":"a07ca8f7-3387-4f58-a094-26d491028752","Type":"ContainerStarted","Data":"32c84a319d62d23053311a2833bc78d2a80cea5c80329bf5e109babdcb818bb4"} Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.567949 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.567915386 podStartE2EDuration="3.567915386s" podCreationTimestamp="2026-01-27 20:26:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:55.562683315 +0000 UTC m=+1440.952936501" watchObservedRunningTime="2026-01-27 20:26:55.567915386 +0000 UTC m=+1440.958168542" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.614211 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-cql8q" podStartSLOduration=5.134402049 podStartE2EDuration="1m17.614186872s" podCreationTimestamp="2026-01-27 20:25:38 +0000 UTC" firstStartedPulling="2026-01-27 20:25:41.269364656 +0000 UTC m=+1366.659617812" lastFinishedPulling="2026-01-27 20:26:53.749149479 +0000 UTC m=+1439.139402635" observedRunningTime="2026-01-27 20:26:55.584133015 +0000 UTC m=+1440.974386171" watchObservedRunningTime="2026-01-27 20:26:55.614186872 +0000 UTC m=+1441.004440028" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.837821 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e371b4f-e558-4684-ac2b-952843dc8b34" path="/var/lib/kubelet/pods/1e371b4f-e558-4684-ac2b-952843dc8b34/volumes" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.874091 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a596596e-7206-49bb-92c8-9e8e551a8744" path="/var/lib/kubelet/pods/a596596e-7206-49bb-92c8-9e8e551a8744/volumes" Jan 27 20:26:55 crc kubenswrapper[4793]: I0127 20:26:55.888284 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:26:55 crc kubenswrapper[4793]: W0127 20:26:55.892194 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c3db2fc_e883_483d_85fa_953061fd0f5a.slice/crio-f90d358afc02ec79d00e0fabaf969358fe97776a36cd11ab36ab0c974742ac66 WatchSource:0}: Error finding container f90d358afc02ec79d00e0fabaf969358fe97776a36cd11ab36ab0c974742ac66: Status 404 returned error can't find the container with id f90d358afc02ec79d00e0fabaf969358fe97776a36cd11ab36ab0c974742ac66 Jan 27 20:26:56 crc kubenswrapper[4793]: I0127 20:26:56.572304 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerStarted","Data":"ecf5044d9db26de30ffd161f4c9633a199156e9809cfb6028601faf12a564bde"} Jan 27 20:26:56 crc kubenswrapper[4793]: I0127 20:26:56.573357 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerStarted","Data":"7e1375b26b6e9a6a21f8174140f7c107d7630b0be02dce46950ecce13a9a60d0"} Jan 27 20:26:56 crc kubenswrapper[4793]: I0127 20:26:56.576055 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerStarted","Data":"f90d358afc02ec79d00e0fabaf969358fe97776a36cd11ab36ab0c974742ac66"} Jan 27 20:26:57 crc kubenswrapper[4793]: I0127 20:26:57.589475 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerStarted","Data":"1f5e058677b35ac471f327db9afd2e39f6c681c28374b80600c4b632f4c744cb"} Jan 27 20:26:57 crc kubenswrapper[4793]: I0127 20:26:57.592906 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:26:57 crc kubenswrapper[4793]: I0127 20:26:57.594513 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerStarted","Data":"5fc2b23273ae646efdecc9b22a3cacc0c451c4b9af4bce6696c35d0d0a471e97"} Jan 27 20:26:57 crc kubenswrapper[4793]: I0127 20:26:57.636926 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.636903406 podStartE2EDuration="3.636903406s" podCreationTimestamp="2026-01-27 20:26:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:57.627728025 +0000 UTC m=+1443.017981171" watchObservedRunningTime="2026-01-27 20:26:57.636903406 +0000 UTC m=+1443.027156562" Jan 27 20:26:57 crc kubenswrapper[4793]: I0127 20:26:57.901493 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.056925 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.057034 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.174758 4793 scope.go:117] "RemoveContainer" containerID="b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.603262 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerStarted","Data":"a4d64b64b9ed12660713d7b7e2bd1cba9c4ce5fe4df5f28ac3c4c100ed55171f"} Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.605688 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398"} Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.606172 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.630421 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.630374007 podStartE2EDuration="4.630374007s" podCreationTimestamp="2026-01-27 20:26:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:26:58.624961522 +0000 UTC m=+1444.015214678" watchObservedRunningTime="2026-01-27 20:26:58.630374007 +0000 UTC m=+1444.020627163" Jan 27 20:26:58 crc kubenswrapper[4793]: I0127 20:26:58.803035 4793 scope.go:117] "RemoveContainer" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.269333 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.624100 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144"} Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.626580 4793 generic.go:334] "Generic (PLEG): container finished" podID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerID="171f806f43a9c519891fb1a87d398d8a4ec7313a80c4dcaff2917449857ae3ca" exitCode=0 Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.627149 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerDied","Data":"171f806f43a9c519891fb1a87d398d8a4ec7313a80c4dcaff2917449857ae3ca"} Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.627171 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aec1692e-4b57-4c95-863f-589e8f36e4a1","Type":"ContainerDied","Data":"41dbf848c4c3f99fd4dd08b025627e1339749b3664716eabb61e449cc5200f4d"} Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.627182 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41dbf848c4c3f99fd4dd08b025627e1339749b3664716eabb61e449cc5200f4d" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.704791 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.825655 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826084 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826133 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wskfl\" (UniqueName: \"kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826177 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826264 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826417 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.826445 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle\") pod \"aec1692e-4b57-4c95-863f-589e8f36e4a1\" (UID: \"aec1692e-4b57-4c95-863f-589e8f36e4a1\") " Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.827646 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.828780 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.834804 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl" (OuterVolumeSpecName: "kube-api-access-wskfl") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "kube-api-access-wskfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.840845 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts" (OuterVolumeSpecName: "scripts") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.918677 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.919267 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.925487 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data" (OuterVolumeSpecName: "config-data") pod "aec1692e-4b57-4c95-863f-589e8f36e4a1" (UID: "aec1692e-4b57-4c95-863f-589e8f36e4a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929015 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929039 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929049 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929058 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aec1692e-4b57-4c95-863f-589e8f36e4a1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929068 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wskfl\" (UniqueName: \"kubernetes.io/projected/aec1692e-4b57-4c95-863f-589e8f36e4a1-kube-api-access-wskfl\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929077 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:26:59 crc kubenswrapper[4793]: I0127 20:26:59.929085 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec1692e-4b57-4c95-863f-589e8f36e4a1-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.689448 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.772685 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.793941 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.816820 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:00 crc kubenswrapper[4793]: E0127 20:27:00.817267 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="sg-core" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817288 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="sg-core" Jan 27 20:27:00 crc kubenswrapper[4793]: E0127 20:27:00.817329 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="ceilometer-notification-agent" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817337 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="ceilometer-notification-agent" Jan 27 20:27:00 crc kubenswrapper[4793]: E0127 20:27:00.817349 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="proxy-httpd" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817354 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="proxy-httpd" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817517 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="proxy-httpd" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817531 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="ceilometer-notification-agent" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.817539 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" containerName="sg-core" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.819255 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.822851 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.823071 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.824732 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885532 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885696 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885730 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885768 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885792 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs2z4\" (UniqueName: \"kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885851 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.885919 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.931795 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986494 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986630 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986676 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986712 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986736 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs2z4\" (UniqueName: \"kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986779 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.986854 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.987217 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.987627 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.992335 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.992997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:00 crc kubenswrapper[4793]: I0127 20:27:00.993298 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.005424 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.017398 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs2z4\" (UniqueName: \"kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4\") pod \"ceilometer-0\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " pod="openstack/ceilometer-0" Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.029214 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.144030 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.697422 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="dnsmasq-dns" containerID="cri-o://562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be" gracePeriod=10 Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.766896 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:01 crc kubenswrapper[4793]: I0127 20:27:01.817100 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aec1692e-4b57-4c95-863f-589e8f36e4a1" path="/var/lib/kubelet/pods/aec1692e-4b57-4c95-863f-589e8f36e4a1/volumes" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.485489 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677271 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677731 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677782 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677821 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677885 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25lc7\" (UniqueName: \"kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.677967 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb\") pod \"1ac23d66-3218-4159-9f23-87d2ab5078ed\" (UID: \"1ac23d66-3218-4159-9f23-87d2ab5078ed\") " Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.724738 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7" (OuterVolumeSpecName: "kube-api-access-25lc7") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "kube-api-access-25lc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.768452 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.779804 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.779851 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25lc7\" (UniqueName: \"kubernetes.io/projected/1ac23d66-3218-4159-9f23-87d2ab5078ed-kube-api-access-25lc7\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.802816 4793 generic.go:334] "Generic (PLEG): container finished" podID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerID="562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be" exitCode=0 Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.802903 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" event={"ID":"1ac23d66-3218-4159-9f23-87d2ab5078ed","Type":"ContainerDied","Data":"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be"} Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.802923 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" event={"ID":"1ac23d66-3218-4159-9f23-87d2ab5078ed","Type":"ContainerDied","Data":"57b95ec1249f52bdf8d191b10fecb5310273ea8d140cd6611e95079ea7f0c775"} Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.802946 4793 scope.go:117] "RemoveContainer" containerID="562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.803063 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c9bc79ddc-g8sj2" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.829115 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerStarted","Data":"8dc8b4f68261a58f0ab83795e3b834111a882ca5c8361ea0cb2370e508af6ae8"} Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.829166 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerStarted","Data":"2e49489ae6508cce006d4df8bac509538cf9facd5875224089e7f9f4d246ad37"} Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.851966 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.861178 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.865641 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config" (OuterVolumeSpecName: "config") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.891048 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.891100 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.891114 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.898759 4793 scope.go:117] "RemoveContainer" containerID="b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.899510 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1ac23d66-3218-4159-9f23-87d2ab5078ed" (UID: "1ac23d66-3218-4159-9f23-87d2ab5078ed"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.907292 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.917071 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.957054 4793 scope.go:117] "RemoveContainer" containerID="562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be" Jan 27 20:27:02 crc kubenswrapper[4793]: E0127 20:27:02.957566 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be\": container with ID starting with 562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be not found: ID does not exist" containerID="562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.957621 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be"} err="failed to get container status \"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be\": rpc error: code = NotFound desc = could not find container \"562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be\": container with ID starting with 562aaf81315c39c43437775a7e79300ae359521ddd01223e9f707e04a06c06be not found: ID does not exist" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.957650 4793 scope.go:117] "RemoveContainer" containerID="b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa" Jan 27 20:27:02 crc kubenswrapper[4793]: E0127 20:27:02.957913 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa\": container with ID starting with b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa not found: ID does not exist" containerID="b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.957934 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa"} err="failed to get container status \"b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa\": rpc error: code = NotFound desc = could not find container \"b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa\": container with ID starting with b17c32d87128299a2bd7c304d43ffcf410fe5dfaf76a6d05933f2a61f367f6aa not found: ID does not exist" Jan 27 20:27:02 crc kubenswrapper[4793]: I0127 20:27:02.958584 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 27 20:27:03 crc kubenswrapper[4793]: I0127 20:27:03.053231 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1ac23d66-3218-4159-9f23-87d2ab5078ed-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:03 crc kubenswrapper[4793]: I0127 20:27:03.142070 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:27:03 crc kubenswrapper[4793]: I0127 20:27:03.153482 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c9bc79ddc-g8sj2"] Jan 27 20:27:03 crc kubenswrapper[4793]: I0127 20:27:03.242439 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:27:03 crc kubenswrapper[4793]: E0127 20:27:03.410754 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod045591bb_dd8c_437e_9cf8_0e0b520fc49d.slice/crio-conmon-3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144.scope\": RecentStats: unable to find data in memory cache]" Jan 27 20:27:03 crc kubenswrapper[4793]: I0127 20:27:03.817169 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" path="/var/lib/kubelet/pods/1ac23d66-3218-4159-9f23-87d2ab5078ed/volumes" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.062154 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" exitCode=1 Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.062848 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144"} Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.062989 4793 scope.go:117] "RemoveContainer" containerID="5c33cdbef124884a6a9d4c47d9f48fb1775d0ce10f32393960f0040865c191ca" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.065725 4793 scope.go:117] "RemoveContainer" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" Jan 27 20:27:04 crc kubenswrapper[4793]: E0127 20:27:04.067532 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.078956 4793 generic.go:334] "Generic (PLEG): container finished" podID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerID="f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398" exitCode=1 Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.079051 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398"} Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.079853 4793 scope.go:117] "RemoveContainer" containerID="f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398" Jan 27 20:27:04 crc kubenswrapper[4793]: E0127 20:27:04.080092 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.092495 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerStarted","Data":"b059262a1921cdefdb4e62cf235283090a344a49000a4e4d219675fe55c8c8f5"} Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.109846 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.165606 4793 scope.go:117] "RemoveContainer" containerID="b333c04d88dda9d3fc2438453ff950914a7e834343103e2beb967bc73e514926" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.944431 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.944848 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.991856 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 20:27:04 crc kubenswrapper[4793]: I0127 20:27:04.993905 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.126491 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerStarted","Data":"f9f61f96b9b70889d4ee24b1a5a03d616fbb327ee4e3093ca209f52c490c982c"} Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.127194 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.127267 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.273990 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.274037 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.309871 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.349059 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.486916 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-57fc549f96-h7nth" Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.558207 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.558447 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-54fb8bbf88-42dqw" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon-log" containerID="cri-o://c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e" gracePeriod=30 Jan 27 20:27:05 crc kubenswrapper[4793]: I0127 20:27:05.558721 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-54fb8bbf88-42dqw" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" containerID="cri-o://98563a4eb2a696935e83a51b8bd4a46c15a10a9210e8057d81a360e02c721d48" gracePeriod=30 Jan 27 20:27:06 crc kubenswrapper[4793]: I0127 20:27:06.138724 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:06 crc kubenswrapper[4793]: I0127 20:27:06.138778 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:07 crc kubenswrapper[4793]: I0127 20:27:07.151269 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerStarted","Data":"797e0c8a0dd5f82b7a4ffe184a06800d3820b7b892ca0745baa7ac10658793f5"} Jan 27 20:27:07 crc kubenswrapper[4793]: I0127 20:27:07.151634 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:27:07 crc kubenswrapper[4793]: I0127 20:27:07.185632 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.738887787 podStartE2EDuration="7.185612154s" podCreationTimestamp="2026-01-27 20:27:00 +0000 UTC" firstStartedPulling="2026-01-27 20:27:01.765366521 +0000 UTC m=+1447.155619677" lastFinishedPulling="2026-01-27 20:27:06.212090888 +0000 UTC m=+1451.602344044" observedRunningTime="2026-01-27 20:27:07.175005133 +0000 UTC m=+1452.565258319" watchObservedRunningTime="2026-01-27 20:27:07.185612154 +0000 UTC m=+1452.575865310" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.056224 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.056599 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.057988 4793 scope.go:117] "RemoveContainer" containerID="f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398" Jan 27 20:27:08 crc kubenswrapper[4793]: E0127 20:27:08.058288 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.163552 4793 generic.go:334] "Generic (PLEG): container finished" podID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerID="98563a4eb2a696935e83a51b8bd4a46c15a10a9210e8057d81a360e02c721d48" exitCode=0 Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.163604 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerDied","Data":"98563a4eb2a696935e83a51b8bd4a46c15a10a9210e8057d81a360e02c721d48"} Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.242639 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.242689 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.242701 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.243344 4793 scope.go:117] "RemoveContainer" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" Jan 27 20:27:08 crc kubenswrapper[4793]: E0127 20:27:08.243575 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.304924 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-988699dd4-wjjzw" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.619877 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.620054 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:27:08 crc kubenswrapper[4793]: I0127 20:27:08.626088 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 20:27:09 crc kubenswrapper[4793]: I0127 20:27:09.176857 4793 generic.go:334] "Generic (PLEG): container finished" podID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" containerID="ac49f5f979165c4e7a819a2acfcda5cc4faa76cf76e1f1272e99b26b776b3b8b" exitCode=0 Jan 27 20:27:09 crc kubenswrapper[4793]: I0127 20:27:09.176973 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wlfpp" event={"ID":"64b480ab-f615-4d0b-9b56-ef6d0acf8955","Type":"ContainerDied","Data":"ac49f5f979165c4e7a819a2acfcda5cc4faa76cf76e1f1272e99b26b776b3b8b"} Jan 27 20:27:09 crc kubenswrapper[4793]: I0127 20:27:09.451398 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-54fb8bbf88-42dqw" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.012010 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.012170 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.496991 4793 generic.go:334] "Generic (PLEG): container finished" podID="4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" containerID="b57ae8118283dabfe483c25d17c0911a8fe523b9a5aa9409b7e40dc7832e2ab8" exitCode=0 Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.497427 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cthwx" event={"ID":"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd","Type":"ContainerDied","Data":"b57ae8118283dabfe483c25d17c0911a8fe523b9a5aa9409b7e40dc7832e2ab8"} Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.789123 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 20:27:10 crc kubenswrapper[4793]: I0127 20:27:10.939542 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.197447 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgtxm\" (UniqueName: \"kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm\") pod \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.197801 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data\") pod \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.197899 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle\") pod \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\" (UID: \"64b480ab-f615-4d0b-9b56-ef6d0acf8955\") " Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.203968 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "64b480ab-f615-4d0b-9b56-ef6d0acf8955" (UID: "64b480ab-f615-4d0b-9b56-ef6d0acf8955"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.222901 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm" (OuterVolumeSpecName: "kube-api-access-xgtxm") pod "64b480ab-f615-4d0b-9b56-ef6d0acf8955" (UID: "64b480ab-f615-4d0b-9b56-ef6d0acf8955"). InnerVolumeSpecName "kube-api-access-xgtxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.249894 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64b480ab-f615-4d0b-9b56-ef6d0acf8955" (UID: "64b480ab-f615-4d0b-9b56-ef6d0acf8955"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.299099 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgtxm\" (UniqueName: \"kubernetes.io/projected/64b480ab-f615-4d0b-9b56-ef6d0acf8955-kube-api-access-xgtxm\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.299141 4793 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.299152 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64b480ab-f615-4d0b-9b56-ef6d0acf8955-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.509154 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-wlfpp" event={"ID":"64b480ab-f615-4d0b-9b56-ef6d0acf8955","Type":"ContainerDied","Data":"5c19f9067c922241a4aa64fc432795cc88973be6f60782e1b5b1f25d22b537c4"} Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.509190 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-wlfpp" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.509208 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c19f9067c922241a4aa64fc432795cc88973be6f60782e1b5b1f25d22b537c4" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.741659 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6f6f4d8b5c-97rd8"] Jan 27 20:27:11 crc kubenswrapper[4793]: E0127 20:27:11.742282 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="dnsmasq-dns" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.742302 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="dnsmasq-dns" Jan 27 20:27:11 crc kubenswrapper[4793]: E0127 20:27:11.742312 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="init" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.742320 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="init" Jan 27 20:27:11 crc kubenswrapper[4793]: E0127 20:27:11.742332 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" containerName="barbican-db-sync" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.742340 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" containerName="barbican-db-sync" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.742635 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" containerName="barbican-db-sync" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.742653 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ac23d66-3218-4159-9f23-87d2ab5078ed" containerName="dnsmasq-dns" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.743983 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.759895 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-lwxzs" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.759975 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.761299 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.767624 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-547c9dc95d-r6k22"] Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.779380 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.784803 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.796084 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f6f4d8b5c-97rd8"] Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813633 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data-custom\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813680 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-combined-ca-bundle\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813717 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data-custom\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813737 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csmk6\" (UniqueName: \"kubernetes.io/projected/6139a6f4-f2b8-48f3-8997-e560f4deb75f-kube-api-access-csmk6\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813770 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-combined-ca-bundle\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813787 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-logs\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813846 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6139a6f4-f2b8-48f3-8997-e560f4deb75f-logs\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813861 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813881 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t44m7\" (UniqueName: \"kubernetes.io/projected/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-kube-api-access-t44m7\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.813905 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.851645 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-547c9dc95d-r6k22"] Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.920805 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921182 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6139a6f4-f2b8-48f3-8997-e560f4deb75f-logs\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921226 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t44m7\" (UniqueName: \"kubernetes.io/projected/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-kube-api-access-t44m7\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921266 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921329 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data-custom\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921363 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-combined-ca-bundle\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921406 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data-custom\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921441 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csmk6\" (UniqueName: \"kubernetes.io/projected/6139a6f4-f2b8-48f3-8997-e560f4deb75f-kube-api-access-csmk6\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921510 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-combined-ca-bundle\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921536 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-logs\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.921729 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6139a6f4-f2b8-48f3-8997-e560f4deb75f-logs\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.922080 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-logs\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.932448 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.943242 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-config-data-custom\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.969447 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t44m7\" (UniqueName: \"kubernetes.io/projected/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-kube-api-access-t44m7\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.970865 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csmk6\" (UniqueName: \"kubernetes.io/projected/6139a6f4-f2b8-48f3-8997-e560f4deb75f-kube-api-access-csmk6\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.971928 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6139a6f4-f2b8-48f3-8997-e560f4deb75f-combined-ca-bundle\") pod \"barbican-worker-6f6f4d8b5c-97rd8\" (UID: \"6139a6f4-f2b8-48f3-8997-e560f4deb75f\") " pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.972331 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.973356 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-config-data-custom\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:11 crc kubenswrapper[4793]: I0127 20:27:11.984012 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9caabf-4fb7-4374-966f-27ca72ed8ad3-combined-ca-bundle\") pod \"barbican-keystone-listener-547c9dc95d-r6k22\" (UID: \"3d9caabf-4fb7-4374-966f-27ca72ed8ad3\") " pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.074758 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.076745 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.085454 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.114065 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cthwx" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.140901 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.156746 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.192701 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:12 crc kubenswrapper[4793]: E0127 20:27:12.193340 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" containerName="neutron-db-sync" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.193358 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" containerName="neutron-db-sync" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.193661 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" containerName="neutron-db-sync" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.391368 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.392409 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config\") pod \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.392472 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqx5g\" (UniqueName: \"kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g\") pod \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.392636 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle\") pod \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\" (UID: \"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd\") " Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393074 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393139 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393243 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393265 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393281 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q622q\" (UniqueName: \"kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.393324 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.413640 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.421886 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g" (OuterVolumeSpecName: "kube-api-access-bqx5g") pod "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" (UID: "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd"). InnerVolumeSpecName "kube-api-access-bqx5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.431753 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.433755 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" (UID: "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.485454 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config" (OuterVolumeSpecName: "config") pod "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" (UID: "4ce6efd1-1d02-4e0c-bb44-3e2daac046bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500429 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500494 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500514 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q622q\" (UniqueName: \"kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500596 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500625 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500700 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500721 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500748 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9zlr\" (UniqueName: \"kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500781 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500818 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500882 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500935 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500946 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqx5g\" (UniqueName: \"kubernetes.io/projected/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-kube-api-access-bqx5g\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.500957 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.502271 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.502266 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.502903 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.503363 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.505225 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.523409 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q622q\" (UniqueName: \"kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q\") pod \"dnsmasq-dns-7994fb8dd5-jsx4c\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.538256 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cthwx" event={"ID":"4ce6efd1-1d02-4e0c-bb44-3e2daac046bd","Type":"ContainerDied","Data":"7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a"} Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.538299 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cdbb2825f835a68f7995d57518b95f2093381377aa4e01edc7882bb557ce45a" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.538360 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cthwx" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.608771 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.608829 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9zlr\" (UniqueName: \"kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.608855 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.608889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.608939 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.614189 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.623348 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.636732 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.637928 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.667503 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9zlr\" (UniqueName: \"kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr\") pod \"barbican-api-656c4f975d-cgft5\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.743083 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.834416 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:12 crc kubenswrapper[4793]: I0127 20:27:12.923224 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.022618 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.025498 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040033 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040111 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqvxm\" (UniqueName: \"kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040166 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040203 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040239 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.040257 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.046009 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.391493 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqvxm\" (UniqueName: \"kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.401293 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.401548 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.401746 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.401918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.402162 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.403442 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.404225 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.404620 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.405223 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.405834 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.473769 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqvxm\" (UniqueName: \"kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm\") pod \"dnsmasq-dns-64fb588b47-pjswh\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.493578 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.495424 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.502162 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.502416 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.502615 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.504445 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-wfnzb" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.533832 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.608841 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.608959 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.608991 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.609055 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.609077 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5447\" (UniqueName: \"kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.688492 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.699185 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f6f4d8b5c-97rd8"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.712364 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.712469 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.712519 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.712735 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.712782 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5447\" (UniqueName: \"kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.745051 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.745498 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-547c9dc95d-r6k22"] Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.759070 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.826889 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.826914 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5447\" (UniqueName: \"kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.833494 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle\") pod \"neutron-5c88b98b88-ttglp\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.864648 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:13 crc kubenswrapper[4793]: I0127 20:27:13.900100 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.409224 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.647475 4793 generic.go:334] "Generic (PLEG): container finished" podID="a07ca8f7-3387-4f58-a094-26d491028752" containerID="32c84a319d62d23053311a2833bc78d2a80cea5c80329bf5e109babdcb818bb4" exitCode=0 Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.647650 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cql8q" event={"ID":"a07ca8f7-3387-4f58-a094-26d491028752","Type":"ContainerDied","Data":"32c84a319d62d23053311a2833bc78d2a80cea5c80329bf5e109babdcb818bb4"} Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.662767 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerStarted","Data":"7b6f2dada7cddcd5041d18d829fdecc54c55ce6ef2a047ad7120eaf7a1484eaa"} Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.668851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" event={"ID":"6139a6f4-f2b8-48f3-8997-e560f4deb75f","Type":"ContainerStarted","Data":"cc00a5b3f5c1f2ad9fd6c89ff006d45bde35f20d65e9f82dc564c04e14865650"} Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.680966 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" event={"ID":"ccb3c627-2c29-46e6-91ab-083d3edc3eb6","Type":"ContainerStarted","Data":"1a22e8b28b59e85605e0aeb4ec43d7721bdaa0ed0d68069648c080cbe7275da9"} Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.696230 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" event={"ID":"3d9caabf-4fb7-4374-966f-27ca72ed8ad3","Type":"ContainerStarted","Data":"31fee5ba6adde80dfda7e2b44e703ac8511d0b78e310ee5ca4145e80354d9565"} Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.711777 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/horizon-57fc549f96-h7nth" podUID="598878f3-c1fc-481f-ad69-dacba44a1ccc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:14 crc kubenswrapper[4793]: I0127 20:27:14.711839 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-57fc549f96-h7nth" podUID="598878f3-c1fc-481f-ad69-dacba44a1ccc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:15 crc kubenswrapper[4793]: I0127 20:27:15.390341 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.210825 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerStarted","Data":"28d31860dcd7d9feb0eba4bb17d710a25b8d8dba4d4dadf99ea961f22ff176b1"} Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.244849 4793 generic.go:334] "Generic (PLEG): container finished" podID="ccb3c627-2c29-46e6-91ab-083d3edc3eb6" containerID="84b4ba4633250c170993546b6f1276520e7994c7033979494463375ee9928bae" exitCode=0 Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.244936 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" event={"ID":"ccb3c627-2c29-46e6-91ab-083d3edc3eb6","Type":"ContainerDied","Data":"84b4ba4633250c170993546b6f1276520e7994c7033979494463375ee9928bae"} Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.256306 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" event={"ID":"ff279e02-ebb8-4000-8a22-309165534f9c","Type":"ContainerStarted","Data":"20af7512b6f77fc6262d53bc913c2c738bf428da9549bd8648a1f952232567fb"} Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.353766 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:16 crc kubenswrapper[4793]: W0127 20:27:16.360841 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod630adb3c_5213_4815_81d3_9cfd7948e790.slice/crio-56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9 WatchSource:0}: Error finding container 56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9: Status 404 returned error can't find the container with id 56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9 Jan 27 20:27:16 crc kubenswrapper[4793]: I0127 20:27:16.977620 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101094 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101211 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101269 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101343 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101447 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q622q\" (UniqueName: \"kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.101473 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0\") pod \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\" (UID: \"ccb3c627-2c29-46e6-91ab-083d3edc3eb6\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.154046 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q" (OuterVolumeSpecName: "kube-api-access-q622q") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "kube-api-access-q622q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.161090 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.203980 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config" (OuterVolumeSpecName: "config") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.207097 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q622q\" (UniqueName: \"kubernetes.io/projected/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-kube-api-access-q622q\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.207131 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.207141 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.216413 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.235845 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.236655 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ccb3c627-2c29-46e6-91ab-083d3edc3eb6" (UID: "ccb3c627-2c29-46e6-91ab-083d3edc3eb6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.258985 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cql8q" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.302613 4793 generic.go:334] "Generic (PLEG): container finished" podID="ff279e02-ebb8-4000-8a22-309165534f9c" containerID="690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a" exitCode=0 Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.302690 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" event={"ID":"ff279e02-ebb8-4000-8a22-309165534f9c","Type":"ContainerDied","Data":"690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a"} Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.312324 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" event={"ID":"ccb3c627-2c29-46e6-91ab-083d3edc3eb6","Type":"ContainerDied","Data":"1a22e8b28b59e85605e0aeb4ec43d7721bdaa0ed0d68069648c080cbe7275da9"} Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.312376 4793 scope.go:117] "RemoveContainer" containerID="84b4ba4633250c170993546b6f1276520e7994c7033979494463375ee9928bae" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.312499 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7994fb8dd5-jsx4c" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.314706 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.316802 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.316817 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb3c627-2c29-46e6-91ab-083d3edc3eb6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.361518 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cql8q" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.361624 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cql8q" event={"ID":"a07ca8f7-3387-4f58-a094-26d491028752","Type":"ContainerDied","Data":"6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec"} Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.361677 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e24ee1f0e9e9b4b0d47e713da4001e7242b399c62590670ce363cd4cdb068ec" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.376037 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerStarted","Data":"56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9"} Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.385114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerStarted","Data":"5dcc822ece0ffdf34ee771e2fec72acdba64ad5fed784c7ea1c67126ba54dc5f"} Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.386146 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.386188 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.418652 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.419233 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.419265 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.419367 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.419389 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvc7f\" (UniqueName: \"kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.419408 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id\") pod \"a07ca8f7-3387-4f58-a094-26d491028752\" (UID: \"a07ca8f7-3387-4f58-a094-26d491028752\") " Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.423102 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-656c4f975d-cgft5" podStartSLOduration=5.423078219 podStartE2EDuration="5.423078219s" podCreationTimestamp="2026-01-27 20:27:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:17.410218633 +0000 UTC m=+1462.800471789" watchObservedRunningTime="2026-01-27 20:27:17.423078219 +0000 UTC m=+1462.813331375" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.423699 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.426516 4793 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a07ca8f7-3387-4f58-a094-26d491028752-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.427354 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts" (OuterVolumeSpecName: "scripts") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.436248 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.761108 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.761132 4793 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.762831 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f" (OuterVolumeSpecName: "kube-api-access-pvc7f") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "kube-api-access-pvc7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.792783 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.832270 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data" (OuterVolumeSpecName: "config-data") pod "a07ca8f7-3387-4f58-a094-26d491028752" (UID: "a07ca8f7-3387-4f58-a094-26d491028752"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.864866 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.865078 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a07ca8f7-3387-4f58-a094-26d491028752-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.865090 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvc7f\" (UniqueName: \"kubernetes.io/projected/a07ca8f7-3387-4f58-a094-26d491028752-kube-api-access-pvc7f\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:17 crc kubenswrapper[4793]: I0127 20:27:17.984679 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.009037 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7994fb8dd5-jsx4c"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.397409 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerStarted","Data":"a01259b78e0c432076efad3fedb676023d94b82a4619bb4b946f66ddb34ca89d"} Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.397452 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerStarted","Data":"2970644cb6d048b4d227cb4622856bea76921737325f04924bec542372eb3d62"} Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.397607 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.408378 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" event={"ID":"ff279e02-ebb8-4000-8a22-309165534f9c","Type":"ContainerStarted","Data":"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6"} Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.408698 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.647108 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c88b98b88-ttglp" podStartSLOduration=5.647077581 podStartE2EDuration="5.647077581s" podCreationTimestamp="2026-01-27 20:27:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:18.416004032 +0000 UTC m=+1463.806257188" watchObservedRunningTime="2026-01-27 20:27:18.647077581 +0000 UTC m=+1464.037330737" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.758832 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" podStartSLOduration=6.758806217 podStartE2EDuration="6.758806217s" podCreationTimestamp="2026-01-27 20:27:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:18.678220837 +0000 UTC m=+1464.068474003" watchObservedRunningTime="2026-01-27 20:27:18.758806217 +0000 UTC m=+1464.149059373" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.774617 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:18 crc kubenswrapper[4793]: E0127 20:27:18.775138 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a07ca8f7-3387-4f58-a094-26d491028752" containerName="cinder-db-sync" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.775159 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a07ca8f7-3387-4f58-a094-26d491028752" containerName="cinder-db-sync" Jan 27 20:27:18 crc kubenswrapper[4793]: E0127 20:27:18.775172 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb3c627-2c29-46e6-91ab-083d3edc3eb6" containerName="init" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.775179 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb3c627-2c29-46e6-91ab-083d3edc3eb6" containerName="init" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.779901 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a07ca8f7-3387-4f58-a094-26d491028752" containerName="cinder-db-sync" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.779974 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb3c627-2c29-46e6-91ab-083d3edc3eb6" containerName="init" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.781257 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.788424 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.788481 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.788713 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.789059 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4mmj9" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.796811 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.858457 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.906099 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.916029 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.958271 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985153 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985290 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h59wd\" (UniqueName: \"kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985366 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985451 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985487 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:18 crc kubenswrapper[4793]: I0127 20:27:18.985633 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093587 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093661 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093702 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093736 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093785 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h59wd\" (UniqueName: \"kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093839 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093870 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093931 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztch8\" (UniqueName: \"kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.093974 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.094004 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.094080 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.094119 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.105570 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.106799 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.116133 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.119059 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.128510 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.133770 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h59wd\" (UniqueName: \"kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd\") pod \"cinder-scheduler-0\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.159229 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.161177 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.168934 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.176558 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212709 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212773 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212795 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212841 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212877 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztch8\" (UniqueName: \"kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.212935 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.213796 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.214350 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.215892 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.216405 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.217054 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.282350 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5875f8f849-hlxzn"] Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.284273 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.288213 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.288537 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.298952 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztch8\" (UniqueName: \"kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8\") pod \"dnsmasq-dns-57459cb9f9-4xdq5\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.315018 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5875f8f849-hlxzn"] Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.315867 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-54fb8bbf88-42dqw" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322764 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322858 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322885 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-internal-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322932 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322954 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322977 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-combined-ca-bundle\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.322994 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-public-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323014 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323098 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-httpd-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323151 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2248k\" (UniqueName: \"kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323179 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-ovndb-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323208 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323226 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.323278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gw8h\" (UniqueName: \"kubernetes.io/projected/0510c44d-95e6-4986-a108-87c160fac699-kube-api-access-7gw8h\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.650238 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.658853 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660070 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660129 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660159 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-internal-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660219 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660242 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660273 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-combined-ca-bundle\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660294 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-public-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660316 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660372 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-httpd-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660408 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2248k\" (UniqueName: \"kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660437 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-ovndb-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660469 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660489 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.660569 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gw8h\" (UniqueName: \"kubernetes.io/projected/0510c44d-95e6-4986-a108-87c160fac699-kube-api-access-7gw8h\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.664284 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.665180 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.684266 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.697065 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-combined-ca-bundle\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.697301 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2248k\" (UniqueName: \"kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.706281 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.707140 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.707561 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.707909 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-public-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.708343 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data\") pod \"cinder-api-0\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " pod="openstack/cinder-api-0" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.708503 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-ovndb-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.707864 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gw8h\" (UniqueName: \"kubernetes.io/projected/0510c44d-95e6-4986-a108-87c160fac699-kube-api-access-7gw8h\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.709061 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-httpd-config\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.715305 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0510c44d-95e6-4986-a108-87c160fac699-internal-tls-certs\") pod \"neutron-5875f8f849-hlxzn\" (UID: \"0510c44d-95e6-4986-a108-87c160fac699\") " pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.745959 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-74bfddb9f7-8qtb8" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.817302 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccb3c627-2c29-46e6-91ab-083d3edc3eb6" path="/var/lib/kubelet/pods/ccb3c627-2c29-46e6-91ab-083d3edc3eb6/volumes" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.861156 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:19 crc kubenswrapper[4793]: I0127 20:27:19.925332 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:27:20 crc kubenswrapper[4793]: I0127 20:27:20.807500 4793 scope.go:117] "RemoveContainer" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" Jan 27 20:27:20 crc kubenswrapper[4793]: E0127 20:27:20.808277 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:20 crc kubenswrapper[4793]: I0127 20:27:20.850745 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="dnsmasq-dns" containerID="cri-o://cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6" gracePeriod=10 Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.259063 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.305572 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.317797 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.319517 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.321631 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-2wxc9" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.322340 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.324376 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 27 20:27:21 crc kubenswrapper[4793]: W0127 20:27:21.332793 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb604f471_77da_415e_ab02_1ec4a2b6a56c.slice/crio-c84b25fad0758bf75c04f384414eb518fe74fce0de3e8d1d66585b141b204758 WatchSource:0}: Error finding container c84b25fad0758bf75c04f384414eb518fe74fce0de3e8d1d66585b141b204758: Status 404 returned error can't find the container with id c84b25fad0758bf75c04f384414eb518fe74fce0de3e8d1d66585b141b204758 Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.337718 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.526939 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.527001 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.527062 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw5bf\" (UniqueName: \"kubernetes.io/projected/e65fb7be-afef-4a68-b5a9-e772125ee668-kube-api-access-kw5bf\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.527120 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config-secret\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.645469 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.645513 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.645583 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw5bf\" (UniqueName: \"kubernetes.io/projected/e65fb7be-afef-4a68-b5a9-e772125ee668-kube-api-access-kw5bf\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.645624 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config-secret\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.647235 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.647848 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.659926 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.660449 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e65fb7be-afef-4a68-b5a9-e772125ee668-openstack-config-secret\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.687991 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw5bf\" (UniqueName: \"kubernetes.io/projected/e65fb7be-afef-4a68-b5a9-e772125ee668-kube-api-access-kw5bf\") pod \"openstackclient\" (UID: \"e65fb7be-afef-4a68-b5a9-e772125ee668\") " pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.706849 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5875f8f849-hlxzn"] Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.767014 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.870479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" event={"ID":"3d9caabf-4fb7-4374-966f-27ca72ed8ad3","Type":"ContainerStarted","Data":"608cf38b439eefda25b200aae8d815850ba423dfcf2d4057a7bc594107a3162a"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.870862 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.873429 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" event={"ID":"7e6e1e12-0db9-4959-a685-bed3c5382fa8","Type":"ContainerStarted","Data":"e04cfba649278389579f720a7c6da9d15dceb178f1ddac31750f0dd790cb949c"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.875657 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerStarted","Data":"047dd9c9bdcb31aa56115831b6b590bcdb8046a7ea5b7aa26ab4555877472b7f"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.880951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" event={"ID":"6139a6f4-f2b8-48f3-8997-e560f4deb75f","Type":"ContainerStarted","Data":"f2b68cb6124bc39ba184b01d8406fd5829b41d8a4697b302e8c33db525e8874f"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.888508 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerStarted","Data":"c84b25fad0758bf75c04f384414eb518fe74fce0de3e8d1d66585b141b204758"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.905413 4793 generic.go:334] "Generic (PLEG): container finished" podID="ff279e02-ebb8-4000-8a22-309165534f9c" containerID="cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6" exitCode=0 Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.905635 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.905717 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" event={"ID":"ff279e02-ebb8-4000-8a22-309165534f9c","Type":"ContainerDied","Data":"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.905798 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64fb588b47-pjswh" event={"ID":"ff279e02-ebb8-4000-8a22-309165534f9c","Type":"ContainerDied","Data":"20af7512b6f77fc6262d53bc913c2c738bf428da9549bd8648a1f952232567fb"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.905827 4793 scope.go:117] "RemoveContainer" containerID="cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6" Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.916796 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5875f8f849-hlxzn" event={"ID":"0510c44d-95e6-4986-a108-87c160fac699","Type":"ContainerStarted","Data":"5b31efdec7229b0e46152c3e5294eed8e533e80e36a48cf19b73f3011691e762"} Jan 27 20:27:21 crc kubenswrapper[4793]: I0127 20:27:21.987762 4793 scope.go:117] "RemoveContainer" containerID="690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.048661 4793 scope.go:117] "RemoveContainer" containerID="cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6" Jan 27 20:27:22 crc kubenswrapper[4793]: E0127 20:27:22.049149 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6\": container with ID starting with cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6 not found: ID does not exist" containerID="cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.049199 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6"} err="failed to get container status \"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6\": rpc error: code = NotFound desc = could not find container \"cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6\": container with ID starting with cbb595389c24c858d98b4912a9098004e3a311a506125acb92a1618fa34f3db6 not found: ID does not exist" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.049236 4793 scope.go:117] "RemoveContainer" containerID="690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a" Jan 27 20:27:22 crc kubenswrapper[4793]: E0127 20:27:22.049767 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a\": container with ID starting with 690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a not found: ID does not exist" containerID="690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.049811 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a"} err="failed to get container status \"690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a\": rpc error: code = NotFound desc = could not find container \"690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a\": container with ID starting with 690880a6c076b73a88d601ceb6f5272c4d0fc83a859281af9f4411a81926f20a not found: ID does not exist" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060277 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060326 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060346 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060492 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060579 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqvxm\" (UniqueName: \"kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.060617 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb\") pod \"ff279e02-ebb8-4000-8a22-309165534f9c\" (UID: \"ff279e02-ebb8-4000-8a22-309165534f9c\") " Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.079818 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm" (OuterVolumeSpecName: "kube-api-access-fqvxm") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "kube-api-access-fqvxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.184566 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqvxm\" (UniqueName: \"kubernetes.io/projected/ff279e02-ebb8-4000-8a22-309165534f9c-kube-api-access-fqvxm\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.239850 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.242057 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.244100 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.325492 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.325579 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.325603 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.458039 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config" (OuterVolumeSpecName: "config") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.466890 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff279e02-ebb8-4000-8a22-309165534f9c" (UID: "ff279e02-ebb8-4000-8a22-309165534f9c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.531304 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.531342 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff279e02-ebb8-4000-8a22-309165534f9c-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.732976 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.756653 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.756709 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.756759 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.757691 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.757782 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6" gracePeriod=600 Jan 27 20:27:22 crc kubenswrapper[4793]: W0127 20:27:22.925914 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode65fb7be_afef_4a68_b5a9_e772125ee668.slice/crio-af716057cafebc4e44e4ca1ad2dc249731c162a6728bcf4aa0588ab6c77c8235 WatchSource:0}: Error finding container af716057cafebc4e44e4ca1ad2dc249731c162a6728bcf4aa0588ab6c77c8235: Status 404 returned error can't find the container with id af716057cafebc4e44e4ca1ad2dc249731c162a6728bcf4aa0588ab6c77c8235 Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.965747 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:22 crc kubenswrapper[4793]: I0127 20:27:22.979454 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64fb588b47-pjswh"] Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.002095 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6" exitCode=0 Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.002183 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6"} Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.002218 4793 scope.go:117] "RemoveContainer" containerID="c284439d655dda73540e21780ff150cab3f56b6703697582fed54f934aaea296" Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.014120 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" event={"ID":"3d9caabf-4fb7-4374-966f-27ca72ed8ad3","Type":"ContainerStarted","Data":"9b39be4460f82b7ef545762bd3facfc2d9c5b4cd6d10ba41188312aa24e08a00"} Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.027177 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.048916 4793 generic.go:334] "Generic (PLEG): container finished" podID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerID="d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c" exitCode=0 Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.049848 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" event={"ID":"7e6e1e12-0db9-4959-a685-bed3c5382fa8","Type":"ContainerDied","Data":"d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c"} Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.057911 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-547c9dc95d-r6k22" podStartSLOduration=5.513616347 podStartE2EDuration="12.057885095s" podCreationTimestamp="2026-01-27 20:27:11 +0000 UTC" firstStartedPulling="2026-01-27 20:27:13.84579156 +0000 UTC m=+1459.236044716" lastFinishedPulling="2026-01-27 20:27:20.390060308 +0000 UTC m=+1465.780313464" observedRunningTime="2026-01-27 20:27:23.046220309 +0000 UTC m=+1468.436473455" watchObservedRunningTime="2026-01-27 20:27:23.057885095 +0000 UTC m=+1468.448138251" Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.066852 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" event={"ID":"6139a6f4-f2b8-48f3-8997-e560f4deb75f","Type":"ContainerStarted","Data":"89aa61bbf54e28eda753c882553d1b7470bfa071524662eb29212226c300c2bf"} Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.071865 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5875f8f849-hlxzn" event={"ID":"0510c44d-95e6-4986-a108-87c160fac699","Type":"ContainerStarted","Data":"a8de86db5066e0ccc7dba91beb66fb46dc271217b449af8780c0247b8f8dd95f"} Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.116884 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6f6f4d8b5c-97rd8" podStartSLOduration=5.584352746 podStartE2EDuration="12.116856174s" podCreationTimestamp="2026-01-27 20:27:11 +0000 UTC" firstStartedPulling="2026-01-27 20:27:13.845850212 +0000 UTC m=+1459.236103378" lastFinishedPulling="2026-01-27 20:27:20.37835365 +0000 UTC m=+1465.768606806" observedRunningTime="2026-01-27 20:27:23.11096062 +0000 UTC m=+1468.501213776" watchObservedRunningTime="2026-01-27 20:27:23.116856174 +0000 UTC m=+1468.507109330" Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.806377 4793 scope.go:117] "RemoveContainer" containerID="f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398" Jan 27 20:27:23 crc kubenswrapper[4793]: I0127 20:27:23.866065 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" path="/var/lib/kubelet/pods/ff279e02-ebb8-4000-8a22-309165534f9c/volumes" Jan 27 20:27:24 crc kubenswrapper[4793]: I0127 20:27:24.118629 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5875f8f849-hlxzn" event={"ID":"0510c44d-95e6-4986-a108-87c160fac699","Type":"ContainerStarted","Data":"55895ee14019fe377466ed619338e0155792a93c9e96bb54e7185e39e6e095a7"} Jan 27 20:27:24 crc kubenswrapper[4793]: I0127 20:27:24.118985 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:24 crc kubenswrapper[4793]: I0127 20:27:24.143036 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3"} Jan 27 20:27:24 crc kubenswrapper[4793]: I0127 20:27:24.150787 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e65fb7be-afef-4a68-b5a9-e772125ee668","Type":"ContainerStarted","Data":"af716057cafebc4e44e4ca1ad2dc249731c162a6728bcf4aa0588ab6c77c8235"} Jan 27 20:27:24 crc kubenswrapper[4793]: I0127 20:27:24.160011 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5875f8f849-hlxzn" podStartSLOduration=5.159995201 podStartE2EDuration="5.159995201s" podCreationTimestamp="2026-01-27 20:27:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:24.159494349 +0000 UTC m=+1469.549747505" watchObservedRunningTime="2026-01-27 20:27:24.159995201 +0000 UTC m=+1469.550248357" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.274834 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" event={"ID":"7e6e1e12-0db9-4959-a685-bed3c5382fa8","Type":"ContainerStarted","Data":"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f"} Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.275788 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.300114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b"} Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.303749 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" podStartSLOduration=7.303722941 podStartE2EDuration="7.303722941s" podCreationTimestamp="2026-01-27 20:27:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:25.298065172 +0000 UTC m=+1470.688318338" watchObservedRunningTime="2026-01-27 20:27:25.303722941 +0000 UTC m=+1470.693976097" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.333618 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerStarted","Data":"b47f000ec28914b3c309b283c56a015d67e34250197653eb6e74fb47093b2dcb"} Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.373627 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerStarted","Data":"362d97b2baa46d3e5642cc3506b1263d186bbc9221909a73d25cc3561cd80b98"} Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.690308 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7b5cf4cb74-b6v79"] Jan 27 20:27:25 crc kubenswrapper[4793]: E0127 20:27:25.691459 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="init" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.691518 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="init" Jan 27 20:27:25 crc kubenswrapper[4793]: E0127 20:27:25.691559 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="dnsmasq-dns" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.691567 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="dnsmasq-dns" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.691903 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff279e02-ebb8-4000-8a22-309165534f9c" containerName="dnsmasq-dns" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.693008 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.696103 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.696254 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.710424 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b5cf4cb74-b6v79"] Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880257 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880362 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfm9t\" (UniqueName: \"kubernetes.io/projected/15d23321-0811-4752-b014-9a4f08ceac3f-kube-api-access-lfm9t\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880428 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-public-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880524 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-combined-ca-bundle\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880610 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15d23321-0811-4752-b014-9a4f08ceac3f-logs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880677 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data-custom\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:25 crc kubenswrapper[4793]: I0127 20:27:25.880716 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-internal-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122090 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-combined-ca-bundle\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122140 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15d23321-0811-4752-b014-9a4f08ceac3f-logs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122179 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data-custom\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122215 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-internal-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122352 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122376 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfm9t\" (UniqueName: \"kubernetes.io/projected/15d23321-0811-4752-b014-9a4f08ceac3f-kube-api-access-lfm9t\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122415 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-public-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.122682 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15d23321-0811-4752-b014-9a4f08ceac3f-logs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.140331 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-internal-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.159783 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data-custom\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.249332 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-combined-ca-bundle\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.251687 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-config-data\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.271173 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15d23321-0811-4752-b014-9a4f08ceac3f-public-tls-certs\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.310910 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfm9t\" (UniqueName: \"kubernetes.io/projected/15d23321-0811-4752-b014-9a4f08ceac3f-kube-api-access-lfm9t\") pod \"barbican-api-7b5cf4cb74-b6v79\" (UID: \"15d23321-0811-4752-b014-9a4f08ceac3f\") " pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:26 crc kubenswrapper[4793]: I0127 20:27:26.373145 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.171423 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b5cf4cb74-b6v79"] Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.225499 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.227614 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.517906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerStarted","Data":"1094f406188e6f61e12a913e80ca06ec3e8f8b46f186e41e59f7526802253ff9"} Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.539691 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5cf4cb74-b6v79" event={"ID":"15d23321-0811-4752-b014-9a4f08ceac3f","Type":"ContainerStarted","Data":"bbb834be3bdbbc48af6f0247b083ecfc893f0ff88257600ddee3d6598ea808bd"} Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.559785 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.966539668 podStartE2EDuration="9.559764087s" podCreationTimestamp="2026-01-27 20:27:18 +0000 UTC" firstStartedPulling="2026-01-27 20:27:21.350596246 +0000 UTC m=+1466.740849402" lastFinishedPulling="2026-01-27 20:27:21.943820665 +0000 UTC m=+1467.334073821" observedRunningTime="2026-01-27 20:27:27.556124048 +0000 UTC m=+1472.946377214" watchObservedRunningTime="2026-01-27 20:27:27.559764087 +0000 UTC m=+1472.950017243" Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.862814 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:27 crc kubenswrapper[4793]: I0127 20:27:27.863541 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.289828 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.292497 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.466295 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.763580 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5cf4cb74-b6v79" event={"ID":"15d23321-0811-4752-b014-9a4f08ceac3f","Type":"ContainerStarted","Data":"a5e4c24b475d68db34e4db0d6df6847ef4e50203909e8414fd91a3f509680cf3"} Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.787449 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api-log" containerID="cri-o://b47f000ec28914b3c309b283c56a015d67e34250197653eb6e74fb47093b2dcb" gracePeriod=30 Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.787605 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerStarted","Data":"670aa5e8d5be809bc290d7d8891e969f16e7c66d8312bb071336101b87296141"} Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.788216 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.788649 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" containerID="cri-o://670aa5e8d5be809bc290d7d8891e969f16e7c66d8312bb071336101b87296141" gracePeriod=30 Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.835671 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=9.835646844 podStartE2EDuration="9.835646844s" podCreationTimestamp="2026-01-27 20:27:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:28.823618599 +0000 UTC m=+1474.213871755" watchObservedRunningTime="2026-01-27 20:27:28.835646844 +0000 UTC m=+1474.225900000" Jan 27 20:27:28 crc kubenswrapper[4793]: I0127 20:27:28.866767 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.838011 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-54fb8bbf88-42dqw" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.158:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.158:8443: connect: connection refused" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.838588 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.179:8080/\": dial tcp 10.217.0.179:8080: connect: connection refused" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.860010 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.860343 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.860468 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.911960 4793 generic.go:334] "Generic (PLEG): container finished" podID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerID="b47f000ec28914b3c309b283c56a015d67e34250197653eb6e74fb47093b2dcb" exitCode=143 Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.912089 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerDied","Data":"b47f000ec28914b3c309b283c56a015d67e34250197653eb6e74fb47093b2dcb"} Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.936637 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5cf4cb74-b6v79" event={"ID":"15d23321-0811-4752-b014-9a4f08ceac3f","Type":"ContainerStarted","Data":"4fb3551493d77f9364000272b308b2b88cc2483aa0ce1b9adaf0bda10abc6e3b"} Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.937059 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:29 crc kubenswrapper[4793]: I0127 20:27:29.937236 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:30 crc kubenswrapper[4793]: I0127 20:27:30.362874 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7b5cf4cb74-b6v79" podStartSLOduration=5.362849218 podStartE2EDuration="5.362849218s" podCreationTimestamp="2026-01-27 20:27:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:30.3401046 +0000 UTC m=+1475.730357756" watchObservedRunningTime="2026-01-27 20:27:30.362849218 +0000 UTC m=+1475.753102374" Jan 27 20:27:30 crc kubenswrapper[4793]: I0127 20:27:30.369170 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:27:30 crc kubenswrapper[4793]: I0127 20:27:30.369498 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="dnsmasq-dns" containerID="cri-o://fbd5473a6e6a4b3624f9a6b7f32d0b0a62d9a22bf641d31377cb499b7de0fa9c" gracePeriod=10 Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.249126 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.301207 4793 generic.go:334] "Generic (PLEG): container finished" podID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerID="fbd5473a6e6a4b3624f9a6b7f32d0b0a62d9a22bf641d31377cb499b7de0fa9c" exitCode=0 Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.302733 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerDied","Data":"fbd5473a6e6a4b3624f9a6b7f32d0b0a62d9a22bf641d31377cb499b7de0fa9c"} Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.487326 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.570631 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.570977 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.571044 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.571111 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnc5x\" (UniqueName: \"kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.571154 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.571217 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb\") pod \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\" (UID: \"3ffb9b65-beb8-4455-92cf-6f275b4f4946\") " Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.604611 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x" (OuterVolumeSpecName: "kube-api-access-mnc5x") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "kube-api-access-mnc5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.644115 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.673241 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnc5x\" (UniqueName: \"kubernetes.io/projected/3ffb9b65-beb8-4455-92cf-6f275b4f4946-kube-api-access-mnc5x\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.673286 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.694122 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.736347 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config" (OuterVolumeSpecName: "config") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:31 crc kubenswrapper[4793]: I0127 20:27:31.737107 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.148924 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.148953 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.148963 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.262292 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3ffb9b65-beb8-4455-92cf-6f275b4f4946" (UID: "3ffb9b65-beb8-4455-92cf-6f275b4f4946"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.278801 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.317873 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.329872 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.329916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" event={"ID":"3ffb9b65-beb8-4455-92cf-6f275b4f4946","Type":"ContainerDied","Data":"c87214058edf38413d6ff30f3bb86763f2e4142171604e1b22d3f264bbc834f2"} Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.329949 4793 scope.go:117] "RemoveContainer" containerID="fbd5473a6e6a4b3624f9a6b7f32d0b0a62d9a22bf641d31377cb499b7de0fa9c" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.356410 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ffb9b65-beb8-4455-92cf-6f275b4f4946-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.387397 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.394183 4793 scope.go:117] "RemoveContainer" containerID="c072cc6ae3de469d9fa68dd964737d4572baeb24ab7ae451767be04389804df1" Jan 27 20:27:32 crc kubenswrapper[4793]: I0127 20:27:32.401446 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69d66b6dcf-bd258"] Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.228804 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.229207 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.236467 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.243205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.804135 4793 scope.go:117] "RemoveContainer" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" Jan 27 20:27:33 crc kubenswrapper[4793]: I0127 20:27:33.815654 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" path="/var/lib/kubelet/pods/3ffb9b65-beb8-4455-92cf-6f275b4f4946/volumes" Jan 27 20:27:35 crc kubenswrapper[4793]: I0127 20:27:35.421470 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc"} Jan 27 20:27:35 crc kubenswrapper[4793]: I0127 20:27:35.511219 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 20:27:35 crc kubenswrapper[4793]: E0127 20:27:35.650205 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39eb7d41_9778_4f8c_a5ab_beb83ee02c3a.slice/crio-c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39eb7d41_9778_4f8c_a5ab_beb83ee02c3a.slice/crio-conmon-c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e.scope\": RecentStats: unable to find data in memory cache]" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.141166 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.211496 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-69d66b6dcf-bd258" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: i/o timeout" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.433667 4793 generic.go:334] "Generic (PLEG): container finished" podID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerID="c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e" exitCode=137 Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.433706 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerDied","Data":"c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e"} Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.433736 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-54fb8bbf88-42dqw" event={"ID":"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a","Type":"ContainerDied","Data":"653a93051b6bbb036135bc48cf90dde05e2f6820135c1d0306e084e4592954a9"} Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.433746 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="653a93051b6bbb036135bc48cf90dde05e2f6820135c1d0306e084e4592954a9" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.444072 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.603152 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604027 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604059 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6sw2n\" (UniqueName: \"kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604187 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604267 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604310 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.604352 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs\") pod \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\" (UID: \"39eb7d41-9778-4f8c-a5ab-beb83ee02c3a\") " Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.605705 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs" (OuterVolumeSpecName: "logs") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.635643 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n" (OuterVolumeSpecName: "kube-api-access-6sw2n") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "kube-api-access-6sw2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.635776 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.639567 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data" (OuterVolumeSpecName: "config-data") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.650948 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts" (OuterVolumeSpecName: "scripts") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:36 crc kubenswrapper[4793]: I0127 20:27:36.668779 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022077 4793 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022112 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6sw2n\" (UniqueName: \"kubernetes.io/projected/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-kube-api-access-6sw2n\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022125 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022134 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022143 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.022151 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.169727 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" (UID: "39eb7d41-9778-4f8c-a5ab-beb83ee02c3a"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.226623 4793 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.443932 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-54fb8bbf88-42dqw" Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.487173 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:27:37 crc kubenswrapper[4793]: I0127 20:27:37.501181 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-54fb8bbf88-42dqw"] Jan 27 20:27:38 crc kubenswrapper[4793]: I0127 20:27:38.134606 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" path="/var/lib/kubelet/pods/39eb7d41-9778-4f8c-a5ab-beb83ee02c3a/volumes" Jan 27 20:27:38 crc kubenswrapper[4793]: I0127 20:27:38.267275 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:27:38 crc kubenswrapper[4793]: I0127 20:27:38.267361 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:38 crc kubenswrapper[4793]: I0127 20:27:38.279000 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 20:27:38 crc kubenswrapper[4793]: I0127 20:27:38.500050 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 20:27:39 crc kubenswrapper[4793]: I0127 20:27:39.486424 4793 generic.go:334] "Generic (PLEG): container finished" podID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" exitCode=1 Jan 27 20:27:39 crc kubenswrapper[4793]: I0127 20:27:39.486515 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b"} Jan 27 20:27:39 crc kubenswrapper[4793]: I0127 20:27:39.486607 4793 scope.go:117] "RemoveContainer" containerID="f7a8200b8c77c8e31ca3cc1869f57c5e8b46ff748a1f699d8e2f166aad3ad398" Jan 27 20:27:39 crc kubenswrapper[4793]: I0127 20:27:39.487628 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:27:39 crc kubenswrapper[4793]: E0127 20:27:39.487922 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:39 crc kubenswrapper[4793]: I0127 20:27:39.948281 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.028859 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.111029 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.111646 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-central-agent" containerID="cri-o://8dc8b4f68261a58f0ab83795e3b834111a882ca5c8361ea0cb2370e508af6ae8" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.111760 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="sg-core" containerID="cri-o://f9f61f96b9b70889d4ee24b1a5a03d616fbb327ee4e3093ca209f52c490c982c" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.111719 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="proxy-httpd" containerID="cri-o://797e0c8a0dd5f82b7a4ffe184a06800d3820b7b892ca0745baa7ac10658793f5" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.111769 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-notification-agent" containerID="cri-o://b059262a1921cdefdb4e62cf235283090a344a49000a4e4d219675fe55c8c8f5" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.192964 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.181:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.272851 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b5cf4cb74-b6v79" Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.366171 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.366411 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" containerID="cri-o://28d31860dcd7d9feb0eba4bb17d710a25b8d8dba4d4dadf99ea961f22ff176b1" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.366590 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" containerID="cri-o://5dcc822ece0ffdf34ee771e2fec72acdba64ad5fed784c7ea1c67126ba54dc5f" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.524667 4793 generic.go:334] "Generic (PLEG): container finished" podID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerID="28d31860dcd7d9feb0eba4bb17d710a25b8d8dba4d4dadf99ea961f22ff176b1" exitCode=143 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.524744 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerDied","Data":"28d31860dcd7d9feb0eba4bb17d710a25b8d8dba4d4dadf99ea961f22ff176b1"} Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.548159 4793 generic.go:334] "Generic (PLEG): container finished" podID="d5748384-5202-4d5e-9339-8a5cab24359d" containerID="797e0c8a0dd5f82b7a4ffe184a06800d3820b7b892ca0745baa7ac10658793f5" exitCode=0 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.548198 4793 generic.go:334] "Generic (PLEG): container finished" podID="d5748384-5202-4d5e-9339-8a5cab24359d" containerID="f9f61f96b9b70889d4ee24b1a5a03d616fbb327ee4e3093ca209f52c490c982c" exitCode=2 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.548421 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" containerID="cri-o://362d97b2baa46d3e5642cc3506b1263d186bbc9221909a73d25cc3561cd80b98" gracePeriod=30 Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.548890 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerDied","Data":"797e0c8a0dd5f82b7a4ffe184a06800d3820b7b892ca0745baa7ac10658793f5"} Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.548932 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerDied","Data":"f9f61f96b9b70889d4ee24b1a5a03d616fbb327ee4e3093ca209f52c490c982c"} Jan 27 20:27:40 crc kubenswrapper[4793]: I0127 20:27:40.551123 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="probe" containerID="cri-o://1094f406188e6f61e12a913e80ca06ec3e8f8b46f186e41e59f7526802253ff9" gracePeriod=30 Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.111952 4793 generic.go:334] "Generic (PLEG): container finished" podID="d5748384-5202-4d5e-9339-8a5cab24359d" containerID="b059262a1921cdefdb4e62cf235283090a344a49000a4e4d219675fe55c8c8f5" exitCode=0 Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.112513 4793 generic.go:334] "Generic (PLEG): container finished" podID="d5748384-5202-4d5e-9339-8a5cab24359d" containerID="8dc8b4f68261a58f0ab83795e3b834111a882ca5c8361ea0cb2370e508af6ae8" exitCode=0 Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.160527 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerDied","Data":"b059262a1921cdefdb4e62cf235283090a344a49000a4e4d219675fe55c8c8f5"} Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.160584 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerDied","Data":"8dc8b4f68261a58f0ab83795e3b834111a882ca5c8361ea0cb2370e508af6ae8"} Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.181473 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" exitCode=1 Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.181536 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc"} Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.181598 4793 scope.go:117] "RemoveContainer" containerID="3135f6d30b9f8d64a130dfb58628947f2f7650a5a0eba624b0c1e61e689b4144" Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.182427 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:27:43 crc kubenswrapper[4793]: E0127 20:27:43.182737 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:43 crc kubenswrapper[4793]: I0127 20:27:43.269512 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.000380 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": read tcp 10.217.0.2:54786->10.217.0.176:9311: read: connection reset by peer" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.000893 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-656c4f975d-cgft5" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.176:9311/healthcheck\": read tcp 10.217.0.2:54788->10.217.0.176:9311: read: connection reset by peer" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.106073 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5564bdd769-xf2h4"] Jan 27 20:27:44 crc kubenswrapper[4793]: E0127 20:27:44.108966 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="init" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.109026 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="init" Jan 27 20:27:44 crc kubenswrapper[4793]: E0127 20:27:44.109047 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon-log" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.109053 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon-log" Jan 27 20:27:44 crc kubenswrapper[4793]: E0127 20:27:44.109064 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.109070 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" Jan 27 20:27:44 crc kubenswrapper[4793]: E0127 20:27:44.109638 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="dnsmasq-dns" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.109649 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="dnsmasq-dns" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.110213 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ffb9b65-beb8-4455-92cf-6f275b4f4946" containerName="dnsmasq-dns" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.110231 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.110246 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39eb7d41-9778-4f8c-a5ab-beb83ee02c3a" containerName="horizon-log" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.115120 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.119026 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.122132 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5564bdd769-xf2h4"] Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.132162 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.132612 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.197992 4793 generic.go:334] "Generic (PLEG): container finished" podID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerID="5dcc822ece0ffdf34ee771e2fec72acdba64ad5fed784c7ea1c67126ba54dc5f" exitCode=0 Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.198066 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerDied","Data":"5dcc822ece0ffdf34ee771e2fec72acdba64ad5fed784c7ea1c67126ba54dc5f"} Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.200740 4793 generic.go:334] "Generic (PLEG): container finished" podID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerID="1094f406188e6f61e12a913e80ca06ec3e8f8b46f186e41e59f7526802253ff9" exitCode=0 Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.200769 4793 generic.go:334] "Generic (PLEG): container finished" podID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerID="362d97b2baa46d3e5642cc3506b1263d186bbc9221909a73d25cc3561cd80b98" exitCode=0 Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.203317 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerDied","Data":"1094f406188e6f61e12a913e80ca06ec3e8f8b46f186e41e59f7526802253ff9"} Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.203451 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerDied","Data":"362d97b2baa46d3e5642cc3506b1263d186bbc9221909a73d25cc3561cd80b98"} Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.215315 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:27:44 crc kubenswrapper[4793]: E0127 20:27:44.215532 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.235201 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d5748384-5202-4d5e-9339-8a5cab24359d","Type":"ContainerDied","Data":"2e49489ae6508cce006d4df8bac509538cf9facd5875224089e7f9f4d246ad37"} Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.235265 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e49489ae6508cce006d4df8bac509538cf9facd5875224089e7f9f4d246ad37" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.263620 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310470 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310617 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310699 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310726 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zs2z4\" (UniqueName: \"kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310791 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310840 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.310890 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle\") pod \"d5748384-5202-4d5e-9339-8a5cab24359d\" (UID: \"d5748384-5202-4d5e-9339-8a5cab24359d\") " Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311149 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-log-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311203 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-internal-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311299 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-etc-swift\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311341 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-public-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311399 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-config-data\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311415 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-run-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311457 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-combined-ca-bundle\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311517 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24978\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-kube-api-access-24978\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.311930 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.313509 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.319597 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4" (OuterVolumeSpecName: "kube-api-access-zs2z4") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "kube-api-access-zs2z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.325873 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts" (OuterVolumeSpecName: "scripts") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.370277 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417758 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-internal-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417848 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-etc-swift\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417867 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-public-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417899 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-config-data\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417917 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-run-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417942 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-combined-ca-bundle\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.417996 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24978\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-kube-api-access-24978\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418046 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-log-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418107 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418119 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418128 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418137 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zs2z4\" (UniqueName: \"kubernetes.io/projected/d5748384-5202-4d5e-9339-8a5cab24359d-kube-api-access-zs2z4\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418145 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d5748384-5202-4d5e-9339-8a5cab24359d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.418775 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-log-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.421250 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aeeeb43-14d9-471a-8433-825ee93be32b-run-httpd\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.431905 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-config-data\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.434213 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-public-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.435624 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-etc-swift\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.448289 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-combined-ca-bundle\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.458403 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aeeeb43-14d9-471a-8433-825ee93be32b-internal-tls-certs\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.460381 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24978\" (UniqueName: \"kubernetes.io/projected/1aeeeb43-14d9-471a-8433-825ee93be32b-kube-api-access-24978\") pod \"swift-proxy-5564bdd769-xf2h4\" (UID: \"1aeeeb43-14d9-471a-8433-825ee93be32b\") " pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.491927 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.522170 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.848994 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.925646 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data" (OuterVolumeSpecName: "config-data") pod "d5748384-5202-4d5e-9339-8a5cab24359d" (UID: "d5748384-5202-4d5e-9339-8a5cab24359d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:44 crc kubenswrapper[4793]: I0127 20:27:44.945666 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5748384-5202-4d5e-9339-8a5cab24359d-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.009804 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7b5cf4cb74-b6v79" podUID="15d23321-0811-4752-b014-9a4f08ceac3f" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.184:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.239231 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.181:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.254806 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.447589 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.498786 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.535627 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.542708 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.574700 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.586632 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587184 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-notification-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587211 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-notification-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587243 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587252 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587270 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="probe" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587282 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="probe" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587308 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-central-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587319 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-central-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587338 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="sg-core" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587345 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="sg-core" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587361 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="proxy-httpd" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587368 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="proxy-httpd" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587385 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587394 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" Jan 27 20:27:45 crc kubenswrapper[4793]: E0127 20:27:45.587404 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587411 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587698 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="sg-core" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587728 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="probe" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587745 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-notification-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587758 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" containerName="cinder-scheduler" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587772 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="proxy-httpd" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587789 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587800 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" containerName="ceilometer-central-agent" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.587811 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" containerName="barbican-api-log" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.590397 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.601996 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.602149 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.645700 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.674710 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9zlr\" (UniqueName: \"kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr\") pod \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.674786 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.674888 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom\") pod \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.674939 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675038 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675067 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675103 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs\") pod \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675125 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675156 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data\") pod \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675258 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle\") pod \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\" (UID: \"c29ca30f-5826-4ff4-a916-0a20b9ec887e\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675348 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h59wd\" (UniqueName: \"kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd\") pod \"b604f471-77da-415e-ab02-1ec4a2b6a56c\" (UID: \"b604f471-77da-415e-ab02-1ec4a2b6a56c\") " Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675696 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675734 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcljm\" (UniqueName: \"kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675833 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675859 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.675926 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.676051 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.676087 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.679839 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs" (OuterVolumeSpecName: "logs") pod "c29ca30f-5826-4ff4-a916-0a20b9ec887e" (UID: "c29ca30f-5826-4ff4-a916-0a20b9ec887e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.683804 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr" (OuterVolumeSpecName: "kube-api-access-g9zlr") pod "c29ca30f-5826-4ff4-a916-0a20b9ec887e" (UID: "c29ca30f-5826-4ff4-a916-0a20b9ec887e"). InnerVolumeSpecName "kube-api-access-g9zlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.684336 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.689025 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.707452 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c29ca30f-5826-4ff4-a916-0a20b9ec887e" (UID: "c29ca30f-5826-4ff4-a916-0a20b9ec887e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.708760 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd" (OuterVolumeSpecName: "kube-api-access-h59wd") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "kube-api-access-h59wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.708992 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts" (OuterVolumeSpecName: "scripts") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.748499 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c29ca30f-5826-4ff4-a916-0a20b9ec887e" (UID: "c29ca30f-5826-4ff4-a916-0a20b9ec887e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.754402 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5564bdd769-xf2h4"] Jan 27 20:27:45 crc kubenswrapper[4793]: W0127 20:27:45.754530 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1aeeeb43_14d9_471a_8433_825ee93be32b.slice/crio-4381d7f980ffaa30144fc849b81ae2193274ccc3ef675abf229681644d929f23 WatchSource:0}: Error finding container 4381d7f980ffaa30144fc849b81ae2193274ccc3ef675abf229681644d929f23: Status 404 returned error can't find the container with id 4381d7f980ffaa30144fc849b81ae2193274ccc3ef675abf229681644d929f23 Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.790860 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.790915 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791026 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791654 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791731 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791764 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791841 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcljm\" (UniqueName: \"kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.791872 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793048 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793069 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h59wd\" (UniqueName: \"kubernetes.io/projected/b604f471-77da-415e-ab02-1ec4a2b6a56c-kube-api-access-h59wd\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793079 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9zlr\" (UniqueName: \"kubernetes.io/projected/c29ca30f-5826-4ff4-a916-0a20b9ec887e-kube-api-access-g9zlr\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793088 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793097 4793 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793105 4793 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b604f471-77da-415e-ab02-1ec4a2b6a56c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793113 4793 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.793122 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29ca30f-5826-4ff4-a916-0a20b9ec887e-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.794117 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.805124 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.805124 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.813956 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data" (OuterVolumeSpecName: "config-data") pod "c29ca30f-5826-4ff4-a916-0a20b9ec887e" (UID: "c29ca30f-5826-4ff4-a916-0a20b9ec887e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.814716 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.818369 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.819111 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcljm\" (UniqueName: \"kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm\") pod \"ceilometer-0\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " pod="openstack/ceilometer-0" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.831717 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.851992 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5748384-5202-4d5e-9339-8a5cab24359d" path="/var/lib/kubelet/pods/d5748384-5202-4d5e-9339-8a5cab24359d/volumes" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.875999 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data" (OuterVolumeSpecName: "config-data") pod "b604f471-77da-415e-ab02-1ec4a2b6a56c" (UID: "b604f471-77da-415e-ab02-1ec4a2b6a56c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.894380 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.894413 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b604f471-77da-415e-ab02-1ec4a2b6a56c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.894426 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29ca30f-5826-4ff4-a916-0a20b9ec887e-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:45 crc kubenswrapper[4793]: I0127 20:27:45.946705 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:27:46 crc kubenswrapper[4793]: E0127 20:27:46.095082 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc29ca30f_5826_4ff4_a916_0a20b9ec887e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc29ca30f_5826_4ff4_a916_0a20b9ec887e.slice/crio-7b6f2dada7cddcd5041d18d829fdecc54c55ce6ef2a047ad7120eaf7a1484eaa\": RecentStats: unable to find data in memory cache]" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.283534 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5564bdd769-xf2h4" event={"ID":"1aeeeb43-14d9-471a-8433-825ee93be32b","Type":"ContainerStarted","Data":"02d7849d4d3fa5c30e4f538192aee0d7a48029a51132da9f5c59a6821f52e506"} Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.283610 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5564bdd769-xf2h4" event={"ID":"1aeeeb43-14d9-471a-8433-825ee93be32b","Type":"ContainerStarted","Data":"4381d7f980ffaa30144fc849b81ae2193274ccc3ef675abf229681644d929f23"} Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.290099 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-656c4f975d-cgft5" event={"ID":"c29ca30f-5826-4ff4-a916-0a20b9ec887e","Type":"ContainerDied","Data":"7b6f2dada7cddcd5041d18d829fdecc54c55ce6ef2a047ad7120eaf7a1484eaa"} Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.290439 4793 scope.go:117] "RemoveContainer" containerID="5dcc822ece0ffdf34ee771e2fec72acdba64ad5fed784c7ea1c67126ba54dc5f" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.290451 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-656c4f975d-cgft5" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.298098 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b604f471-77da-415e-ab02-1ec4a2b6a56c","Type":"ContainerDied","Data":"c84b25fad0758bf75c04f384414eb518fe74fce0de3e8d1d66585b141b204758"} Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.298228 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.355631 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.362667 4793 scope.go:117] "RemoveContainer" containerID="28d31860dcd7d9feb0eba4bb17d710a25b8d8dba4d4dadf99ea961f22ff176b1" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.365940 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.386252 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.395270 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-656c4f975d-cgft5"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.415438 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.415543 4793 scope.go:117] "RemoveContainer" containerID="1094f406188e6f61e12a913e80ca06ec3e8f8b46f186e41e59f7526802253ff9" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.417121 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.420115 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.426366 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.457884 4793 scope.go:117] "RemoveContainer" containerID="362d97b2baa46d3e5642cc3506b1263d186bbc9221909a73d25cc3561cd80b98" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.495206 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.514765 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-scripts\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.514807 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.514896 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsl88\" (UniqueName: \"kubernetes.io/projected/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-kube-api-access-tsl88\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.514993 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.515221 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.515278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.616979 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617036 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617073 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-scripts\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617095 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617168 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsl88\" (UniqueName: \"kubernetes.io/projected/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-kube-api-access-tsl88\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617198 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.617317 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.622459 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.622573 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.628441 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-scripts\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.628956 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-config-data\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.641228 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsl88\" (UniqueName: \"kubernetes.io/projected/8a6fa3f9-7dd5-47d9-8650-eb700dc18497-kube-api-access-tsl88\") pod \"cinder-scheduler-0\" (UID: \"8a6fa3f9-7dd5-47d9-8650-eb700dc18497\") " pod="openstack/cinder-scheduler-0" Jan 27 20:27:46 crc kubenswrapper[4793]: I0127 20:27:46.733070 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.309131 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerStarted","Data":"ba5306e162362475d7f5ebf5b86d1a615020c15ddb389e069e62e684c08ca716"} Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.311763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5564bdd769-xf2h4" event={"ID":"1aeeeb43-14d9-471a-8433-825ee93be32b","Type":"ContainerStarted","Data":"d5874d6b6613170198ee5a2bb31079dc5b77cdd8eb0c2b0915a0534511b16d53"} Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.313067 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.313114 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.882931 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b604f471-77da-415e-ab02-1ec4a2b6a56c" path="/var/lib/kubelet/pods/b604f471-77da-415e-ab02-1ec4a2b6a56c/volumes" Jan 27 20:27:47 crc kubenswrapper[4793]: I0127 20:27:47.883993 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c29ca30f-5826-4ff4-a916-0a20b9ec887e" path="/var/lib/kubelet/pods/c29ca30f-5826-4ff4-a916-0a20b9ec887e/volumes" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.041668 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.056370 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.057168 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:27:48 crc kubenswrapper[4793]: E0127 20:27:48.057489 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.061323 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5564bdd769-xf2h4" podStartSLOduration=5.061304692 podStartE2EDuration="5.061304692s" podCreationTimestamp="2026-01-27 20:27:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:47.344860194 +0000 UTC m=+1492.735113350" watchObservedRunningTime="2026-01-27 20:27:48.061304692 +0000 UTC m=+1493.451557848" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.243247 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.244061 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:27:48 crc kubenswrapper[4793]: E0127 20:27:48.244288 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.244640 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:27:48 crc kubenswrapper[4793]: I0127 20:27:48.333885 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:27:48 crc kubenswrapper[4793]: E0127 20:27:48.335210 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:49 crc kubenswrapper[4793]: I0127 20:27:49.881256 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5875f8f849-hlxzn" Jan 27 20:27:49 crc kubenswrapper[4793]: I0127 20:27:49.973621 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:49 crc kubenswrapper[4793]: I0127 20:27:49.974255 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c88b98b88-ttglp" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-httpd" containerID="cri-o://2970644cb6d048b4d227cb4622856bea76921737325f04924bec542372eb3d62" gracePeriod=30 Jan 27 20:27:49 crc kubenswrapper[4793]: I0127 20:27:49.974506 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c88b98b88-ttglp" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-api" containerID="cri-o://a01259b78e0c432076efad3fedb676023d94b82a4619bb4b946f66ddb34ca89d" gracePeriod=30 Jan 27 20:27:50 crc kubenswrapper[4793]: I0127 20:27:50.962230 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:50 crc kubenswrapper[4793]: I0127 20:27:50.962952 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-log" containerID="cri-o://ecf5044d9db26de30ffd161f4c9633a199156e9809cfb6028601faf12a564bde" gracePeriod=30 Jan 27 20:27:50 crc kubenswrapper[4793]: I0127 20:27:50.963090 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-httpd" containerID="cri-o://5fc2b23273ae646efdecc9b22a3cacc0c451c4b9af4bce6696c35d0d0a471e97" gracePeriod=30 Jan 27 20:27:51 crc kubenswrapper[4793]: I0127 20:27:51.371955 4793 generic.go:334] "Generic (PLEG): container finished" podID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerID="ecf5044d9db26de30ffd161f4c9633a199156e9809cfb6028601faf12a564bde" exitCode=143 Jan 27 20:27:51 crc kubenswrapper[4793]: I0127 20:27:51.372048 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerDied","Data":"ecf5044d9db26de30ffd161f4c9633a199156e9809cfb6028601faf12a564bde"} Jan 27 20:27:51 crc kubenswrapper[4793]: I0127 20:27:51.376454 4793 generic.go:334] "Generic (PLEG): container finished" podID="630adb3c-5213-4815-81d3-9cfd7948e790" containerID="2970644cb6d048b4d227cb4622856bea76921737325f04924bec542372eb3d62" exitCode=0 Jan 27 20:27:51 crc kubenswrapper[4793]: I0127 20:27:51.376527 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerDied","Data":"2970644cb6d048b4d227cb4622856bea76921737325f04924bec542372eb3d62"} Jan 27 20:27:52 crc kubenswrapper[4793]: I0127 20:27:52.922017 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.412247 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerDied","Data":"5fc2b23273ae646efdecc9b22a3cacc0c451c4b9af4bce6696c35d0d0a471e97"} Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.412189 4793 generic.go:334] "Generic (PLEG): container finished" podID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerID="5fc2b23273ae646efdecc9b22a3cacc0c451c4b9af4bce6696c35d0d0a471e97" exitCode=0 Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.504375 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-nmlw2"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.506825 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.514165 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk9wd\" (UniqueName: \"kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.514251 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.530085 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nmlw2"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.618764 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk9wd\" (UniqueName: \"kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.619068 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.619918 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.623361 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-wfkh5"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.624647 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.640873 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wfkh5"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.658239 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk9wd\" (UniqueName: \"kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd\") pod \"nova-api-db-create-nmlw2\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.720227 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz7gj\" (UniqueName: \"kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.720578 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.729763 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7b87-account-create-update-qrtvs"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.731178 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.733404 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.739982 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7b87-account-create-update-qrtvs"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.824787 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.825144 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.825198 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bv8n\" (UniqueName: \"kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.825234 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz7gj\" (UniqueName: \"kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.826344 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.829803 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-7g4f4"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.830945 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.831340 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.840041 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7g4f4"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.862530 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz7gj\" (UniqueName: \"kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj\") pod \"nova-cell0-db-create-wfkh5\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.930229 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.930319 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bv8n\" (UniqueName: \"kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.930494 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.930523 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88v25\" (UniqueName: \"kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.931420 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.959154 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-b226-account-create-update-8gr8q"] Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.960752 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.963985 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.964404 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bv8n\" (UniqueName: \"kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n\") pod \"nova-api-7b87-account-create-update-qrtvs\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:53 crc kubenswrapper[4793]: I0127 20:27:53.973867 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b226-account-create-update-8gr8q"] Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.042454 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrr5q\" (UniqueName: \"kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.042620 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.042646 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88v25\" (UniqueName: \"kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.042848 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.043291 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.064723 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88v25\" (UniqueName: \"kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25\") pod \"nova-cell1-db-create-7g4f4\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.067614 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.085172 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.144863 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.145260 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrr5q\" (UniqueName: \"kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.146653 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.145813 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-9878-account-create-update-t2hhx"] Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.151444 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.153286 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.174847 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrr5q\" (UniqueName: \"kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q\") pod \"nova-cell0-b226-account-create-update-8gr8q\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.183923 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9878-account-create-update-t2hhx"] Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.270633 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.272056 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p56mz\" (UniqueName: \"kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.287678 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.301930 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.366887 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.377835 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.377918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p56mz\" (UniqueName: \"kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.379220 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.401110 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p56mz\" (UniqueName: \"kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz\") pod \"nova-cell1-9878-account-create-update-t2hhx\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.472275 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e","Type":"ContainerDied","Data":"7e1375b26b6e9a6a21f8174140f7c107d7630b0be02dce46950ecce13a9a60d0"} Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.472359 4793 scope.go:117] "RemoveContainer" containerID="5fc2b23273ae646efdecc9b22a3cacc0c451c4b9af4bce6696c35d0d0a471e97" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.472624 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.480702 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.480788 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.480857 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.480944 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.481066 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-292t5\" (UniqueName: \"kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.481129 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.481171 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.481272 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\" (UID: \"8ca404d2-1fe1-4c87-b5b5-aacc11a0525e\") " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.488118 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.489488 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs" (OuterVolumeSpecName: "logs") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.493008 4793 generic.go:334] "Generic (PLEG): container finished" podID="630adb3c-5213-4815-81d3-9cfd7948e790" containerID="a01259b78e0c432076efad3fedb676023d94b82a4619bb4b946f66ddb34ca89d" exitCode=0 Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.493058 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerDied","Data":"a01259b78e0c432076efad3fedb676023d94b82a4619bb4b946f66ddb34ca89d"} Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.507185 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5" (OuterVolumeSpecName: "kube-api-access-292t5") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "kube-api-access-292t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.508000 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts" (OuterVolumeSpecName: "scripts") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.510905 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.526164 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.561868 4793 scope.go:117] "RemoveContainer" containerID="ecf5044d9db26de30ffd161f4c9633a199156e9809cfb6028601faf12a564bde" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.587365 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-292t5\" (UniqueName: \"kubernetes.io/projected/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-kube-api-access-292t5\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.587397 4793 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.587444 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.587457 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.587469 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:54 crc kubenswrapper[4793]: I0127 20:27:54.631083 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.003387 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.005722 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data" (OuterVolumeSpecName: "config-data") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.043959 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.055289 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5564bdd769-xf2h4" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.087903 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.089280 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.146236 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.146505 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.160701 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" (UID: "8ca404d2-1fe1-4c87-b5b5-aacc11a0525e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.172300 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.207273 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nmlw2"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.222127 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wfkh5"] Jan 27 20:27:55 crc kubenswrapper[4793]: W0127 20:27:55.240060 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3a94a7d_379f_4d4d_8728_3b1509189b93.slice/crio-4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3 WatchSource:0}: Error finding container 4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3: Status 404 returned error can't find the container with id 4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3 Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.247519 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5447\" (UniqueName: \"kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447\") pod \"630adb3c-5213-4815-81d3-9cfd7948e790\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.247753 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config\") pod \"630adb3c-5213-4815-81d3-9cfd7948e790\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.248059 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle\") pod \"630adb3c-5213-4815-81d3-9cfd7948e790\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.248298 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs\") pod \"630adb3c-5213-4815-81d3-9cfd7948e790\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.249717 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config\") pod \"630adb3c-5213-4815-81d3-9cfd7948e790\" (UID: \"630adb3c-5213-4815-81d3-9cfd7948e790\") " Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.250993 4793 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.257028 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "630adb3c-5213-4815-81d3-9cfd7948e790" (UID: "630adb3c-5213-4815-81d3-9cfd7948e790"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.272702 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447" (OuterVolumeSpecName: "kube-api-access-v5447") pod "630adb3c-5213-4815-81d3-9cfd7948e790" (UID: "630adb3c-5213-4815-81d3-9cfd7948e790"). InnerVolumeSpecName "kube-api-access-v5447". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.358693 4793 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.358746 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5447\" (UniqueName: \"kubernetes.io/projected/630adb3c-5213-4815-81d3-9cfd7948e790-kube-api-access-v5447\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.438889 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.452248 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.462473 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:55 crc kubenswrapper[4793]: E0127 20:27:55.463639 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-api" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463666 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-api" Jan 27 20:27:55 crc kubenswrapper[4793]: E0127 20:27:55.463688 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463695 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: E0127 20:27:55.463711 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-log" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463717 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-log" Jan 27 20:27:55 crc kubenswrapper[4793]: E0127 20:27:55.463733 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463739 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463918 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-api" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463957 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" containerName="neutron-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463965 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-log" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.463981 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" containerName="glance-httpd" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.465196 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.470769 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.471007 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.477075 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.543487 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config" (OuterVolumeSpecName: "config") pod "630adb3c-5213-4815-81d3-9cfd7948e790" (UID: "630adb3c-5213-4815-81d3-9cfd7948e790"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.552228 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nmlw2" event={"ID":"b87e83c5-3906-4d5e-ae19-58ed6148d219","Type":"ContainerStarted","Data":"4b31e473650d137bb8d55417148cb46c3d9a065abe70f97602dca401334f72b6"} Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698471 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-logs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698535 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-scripts\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698605 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698635 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698736 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698766 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-config-data\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698885 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.698918 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzpk6\" (UniqueName: \"kubernetes.io/projected/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-kube-api-access-xzpk6\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.699007 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.699534 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c88b98b88-ttglp" event={"ID":"630adb3c-5213-4815-81d3-9cfd7948e790","Type":"ContainerDied","Data":"56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9"} Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.699597 4793 scope.go:117] "RemoveContainer" containerID="2970644cb6d048b4d227cb4622856bea76921737325f04924bec542372eb3d62" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.699714 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c88b98b88-ttglp" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.718268 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7g4f4"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.732216 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wfkh5" event={"ID":"c3a94a7d-379f-4d4d-8728-3b1509189b93","Type":"ContainerStarted","Data":"4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3"} Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.750745 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerStarted","Data":"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab"} Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.778242 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7b87-account-create-update-qrtvs"] Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.785613 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"8a6fa3f9-7dd5-47d9-8650-eb700dc18497","Type":"ContainerStarted","Data":"2a46ca4b77193f422bfd404303374c5f49178adae45428777d59862b24b06f13"} Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.786006 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "630adb3c-5213-4815-81d3-9cfd7948e790" (UID: "630adb3c-5213-4815-81d3-9cfd7948e790"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802302 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802359 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-config-data\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802479 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802509 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzpk6\" (UniqueName: \"kubernetes.io/projected/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-kube-api-access-xzpk6\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802584 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-logs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802622 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-scripts\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802666 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802700 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.802796 4793 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.803310 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.805077 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-logs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.807328 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.813920 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-scripts\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.815316 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.807958 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 20:27:55 crc kubenswrapper[4793]: W0127 20:27:55.822599 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f214dfe_47ca_4d9c_804b_1672e923954f.slice/crio-8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9 WatchSource:0}: Error finding container 8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9: Status 404 returned error can't find the container with id 8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9 Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.823921 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzpk6\" (UniqueName: \"kubernetes.io/projected/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-kube-api-access-xzpk6\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.826607 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.838997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.844231 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c-config-data\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.853316 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.900406 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c\") " pod="openstack/glance-default-external-api-0" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.913225 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.977719 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ca404d2-1fe1-4c87-b5b5-aacc11a0525e" path="/var/lib/kubelet/pods/8ca404d2-1fe1-4c87-b5b5-aacc11a0525e/volumes" Jan 27 20:27:55 crc kubenswrapper[4793]: I0127 20:27:55.996963 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b226-account-create-update-8gr8q"] Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.002439 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9878-account-create-update-t2hhx"] Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.027119 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "630adb3c-5213-4815-81d3-9cfd7948e790" (UID: "630adb3c-5213-4815-81d3-9cfd7948e790"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.086202 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/630adb3c-5213-4815-81d3-9cfd7948e790-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.096932 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.121084 4793 scope.go:117] "RemoveContainer" containerID="a01259b78e0c432076efad3fedb676023d94b82a4619bb4b946f66ddb34ca89d" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.125036 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.179801 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=4.020513489 podStartE2EDuration="35.179771188s" podCreationTimestamp="2026-01-27 20:27:21 +0000 UTC" firstStartedPulling="2026-01-27 20:27:22.994203179 +0000 UTC m=+1468.384456335" lastFinishedPulling="2026-01-27 20:27:54.153460878 +0000 UTC m=+1499.543714034" observedRunningTime="2026-01-27 20:27:56.077168527 +0000 UTC m=+1501.467421693" watchObservedRunningTime="2026-01-27 20:27:56.179771188 +0000 UTC m=+1501.570024344" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.403389 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.436715 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5c88b98b88-ttglp"] Jan 27 20:27:56 crc kubenswrapper[4793]: E0127 20:27:56.639129 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod630adb3c_5213_4815_81d3_9cfd7948e790.slice/crio-56c4cb6f6198ece6e54fb7ff743d6cad8ff47169e1f6cee07ef5568ce1dbf3e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod630adb3c_5213_4815_81d3_9cfd7948e790.slice\": RecentStats: unable to find data in memory cache]" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.835080 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" event={"ID":"58fcdaca-8f5a-4bac-8b2f-754e27d164c0","Type":"ContainerStarted","Data":"1b4ed18fe12fbfaf97b29cb2313f8c79eaf86a20544b6f46665edbe435410816"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.835906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" event={"ID":"58fcdaca-8f5a-4bac-8b2f-754e27d164c0","Type":"ContainerStarted","Data":"0be4922c17bc286b9912e678ab1404892a6e552ac9dcaab49c26bcb3aafc7fdf"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.840579 4793 generic.go:334] "Generic (PLEG): container finished" podID="c3a94a7d-379f-4d4d-8728-3b1509189b93" containerID="723fc037554f7a2646d72bd56724f57cd2b241c875a3c28b6df0a1e802ef2a8f" exitCode=0 Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.841062 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wfkh5" event={"ID":"c3a94a7d-379f-4d4d-8728-3b1509189b93","Type":"ContainerDied","Data":"723fc037554f7a2646d72bd56724f57cd2b241c875a3c28b6df0a1e802ef2a8f"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.863416 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerStarted","Data":"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.867971 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" podStartSLOduration=2.867950071 podStartE2EDuration="2.867950071s" podCreationTimestamp="2026-01-27 20:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:56.86298637 +0000 UTC m=+1502.253239526" watchObservedRunningTime="2026-01-27 20:27:56.867950071 +0000 UTC m=+1502.258203227" Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.881204 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" event={"ID":"8f214dfe-47ca-4d9c-804b-1672e923954f","Type":"ContainerStarted","Data":"bebbcdb2a19d935441d6ccfb6fc1c2ed85c30df8f41ee92b1a592e2bdd9eb3a9"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.881698 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" event={"ID":"8f214dfe-47ca-4d9c-804b-1672e923954f","Type":"ContainerStarted","Data":"8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.893229 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e65fb7be-afef-4a68-b5a9-e772125ee668","Type":"ContainerStarted","Data":"228666cedd3e36909a4604b94eef1c3af325169fe247da0ea690913fe44cd787"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.904806 4793 generic.go:334] "Generic (PLEG): container finished" podID="b87e83c5-3906-4d5e-ae19-58ed6148d219" containerID="9083285707e22bb5f6df9bdd6e46984b96a26404f89026386e8eaea6047f945f" exitCode=0 Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.905072 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nmlw2" event={"ID":"b87e83c5-3906-4d5e-ae19-58ed6148d219","Type":"ContainerDied","Data":"9083285707e22bb5f6df9bdd6e46984b96a26404f89026386e8eaea6047f945f"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.917787 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7b87-account-create-update-qrtvs" event={"ID":"607b194b-5aaa-4b00-92f1-3448913e04f5","Type":"ContainerStarted","Data":"1e9e2fc7de5e6103d89879152339fba2c05a835aa8141018c916389dadb8cf03"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.917853 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7b87-account-create-update-qrtvs" event={"ID":"607b194b-5aaa-4b00-92f1-3448913e04f5","Type":"ContainerStarted","Data":"5b7b210eaaeeccca4884b16ae5f6b6847540cbbbc0ec49d0007e5f4897d497c9"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.932193 4793 generic.go:334] "Generic (PLEG): container finished" podID="cdaa4741-780c-464b-80cc-64eeaf8607de" containerID="36d7d084b7b3f438480744f9d357cbf378f30ef65bc8f0af760c5c7d8163c381" exitCode=0 Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.932244 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7g4f4" event={"ID":"cdaa4741-780c-464b-80cc-64eeaf8607de","Type":"ContainerDied","Data":"36d7d084b7b3f438480744f9d357cbf378f30ef65bc8f0af760c5c7d8163c381"} Jan 27 20:27:56 crc kubenswrapper[4793]: I0127 20:27:56.932279 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7g4f4" event={"ID":"cdaa4741-780c-464b-80cc-64eeaf8607de","Type":"ContainerStarted","Data":"f78c14f3020b2cdb35b16e361178c0cf0910536e9405728e8214e5d180705f1b"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.070629 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" podStartSLOduration=4.061522779 podStartE2EDuration="4.061522779s" podCreationTimestamp="2026-01-27 20:27:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:56.918530495 +0000 UTC m=+1502.308783671" watchObservedRunningTime="2026-01-27 20:27:57.061522779 +0000 UTC m=+1502.451775935" Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.114640 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-7b87-account-create-update-qrtvs" podStartSLOduration=4.114613124 podStartE2EDuration="4.114613124s" podCreationTimestamp="2026-01-27 20:27:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:56.946612195 +0000 UTC m=+1502.336865351" watchObservedRunningTime="2026-01-27 20:27:57.114613124 +0000 UTC m=+1502.504866280" Jan 27 20:27:57 crc kubenswrapper[4793]: W0127 20:27:57.155035 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51d1e06b_2f11_4cf6_ba2c_ebdb0d2e1c0c.slice/crio-635f7b3501b5a3a02b1f69117df64c99f9fb70c05d0d7f5bfeb0ef4145ec9560 WatchSource:0}: Error finding container 635f7b3501b5a3a02b1f69117df64c99f9fb70c05d0d7f5bfeb0ef4145ec9560: Status 404 returned error can't find the container with id 635f7b3501b5a3a02b1f69117df64c99f9fb70c05d0d7f5bfeb0ef4145ec9560 Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.171424 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.826941 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="630adb3c-5213-4815-81d3-9cfd7948e790" path="/var/lib/kubelet/pods/630adb3c-5213-4815-81d3-9cfd7948e790/volumes" Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.946213 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"8a6fa3f9-7dd5-47d9-8650-eb700dc18497","Type":"ContainerStarted","Data":"a90e500ce724e8fa3f3f72964d4e0c92c75209b503e77918596a9c9314e9d574"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.957161 4793 generic.go:334] "Generic (PLEG): container finished" podID="607b194b-5aaa-4b00-92f1-3448913e04f5" containerID="1e9e2fc7de5e6103d89879152339fba2c05a835aa8141018c916389dadb8cf03" exitCode=0 Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.957420 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7b87-account-create-update-qrtvs" event={"ID":"607b194b-5aaa-4b00-92f1-3448913e04f5","Type":"ContainerDied","Data":"1e9e2fc7de5e6103d89879152339fba2c05a835aa8141018c916389dadb8cf03"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.973490 4793 generic.go:334] "Generic (PLEG): container finished" podID="58fcdaca-8f5a-4bac-8b2f-754e27d164c0" containerID="1b4ed18fe12fbfaf97b29cb2313f8c79eaf86a20544b6f46665edbe435410816" exitCode=0 Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.973603 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" event={"ID":"58fcdaca-8f5a-4bac-8b2f-754e27d164c0","Type":"ContainerDied","Data":"1b4ed18fe12fbfaf97b29cb2313f8c79eaf86a20544b6f46665edbe435410816"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.986754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerStarted","Data":"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.993214 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c","Type":"ContainerStarted","Data":"635f7b3501b5a3a02b1f69117df64c99f9fb70c05d0d7f5bfeb0ef4145ec9560"} Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.999118 4793 generic.go:334] "Generic (PLEG): container finished" podID="8f214dfe-47ca-4d9c-804b-1672e923954f" containerID="bebbcdb2a19d935441d6ccfb6fc1c2ed85c30df8f41ee92b1a592e2bdd9eb3a9" exitCode=0 Jan 27 20:27:57 crc kubenswrapper[4793]: I0127 20:27:57.999179 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" event={"ID":"8f214dfe-47ca-4d9c-804b-1672e923954f","Type":"ContainerDied","Data":"bebbcdb2a19d935441d6ccfb6fc1c2ed85c30df8f41ee92b1a592e2bdd9eb3a9"} Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.057851 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.058672 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.059092 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:27:58 crc kubenswrapper[4793]: E0127 20:27:58.059458 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.411915 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.514752 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.526430 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542060 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts\") pod \"b87e83c5-3906-4d5e-ae19-58ed6148d219\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542101 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts\") pod \"c3a94a7d-379f-4d4d-8728-3b1509189b93\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542117 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts\") pod \"cdaa4741-780c-464b-80cc-64eeaf8607de\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542157 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88v25\" (UniqueName: \"kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25\") pod \"cdaa4741-780c-464b-80cc-64eeaf8607de\" (UID: \"cdaa4741-780c-464b-80cc-64eeaf8607de\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542205 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pk9wd\" (UniqueName: \"kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd\") pod \"b87e83c5-3906-4d5e-ae19-58ed6148d219\" (UID: \"b87e83c5-3906-4d5e-ae19-58ed6148d219\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.542225 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pz7gj\" (UniqueName: \"kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj\") pod \"c3a94a7d-379f-4d4d-8728-3b1509189b93\" (UID: \"c3a94a7d-379f-4d4d-8728-3b1509189b93\") " Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.547501 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b87e83c5-3906-4d5e-ae19-58ed6148d219" (UID: "b87e83c5-3906-4d5e-ae19-58ed6148d219"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.547953 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c3a94a7d-379f-4d4d-8728-3b1509189b93" (UID: "c3a94a7d-379f-4d4d-8728-3b1509189b93"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.548420 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cdaa4741-780c-464b-80cc-64eeaf8607de" (UID: "cdaa4741-780c-464b-80cc-64eeaf8607de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.563819 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj" (OuterVolumeSpecName: "kube-api-access-pz7gj") pod "c3a94a7d-379f-4d4d-8728-3b1509189b93" (UID: "c3a94a7d-379f-4d4d-8728-3b1509189b93"). InnerVolumeSpecName "kube-api-access-pz7gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.575742 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25" (OuterVolumeSpecName: "kube-api-access-88v25") pod "cdaa4741-780c-464b-80cc-64eeaf8607de" (UID: "cdaa4741-780c-464b-80cc-64eeaf8607de"). InnerVolumeSpecName "kube-api-access-88v25". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.606843 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd" (OuterVolumeSpecName: "kube-api-access-pk9wd") pod "b87e83c5-3906-4d5e-ae19-58ed6148d219" (UID: "b87e83c5-3906-4d5e-ae19-58ed6148d219"). InnerVolumeSpecName "kube-api-access-pk9wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654620 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88v25\" (UniqueName: \"kubernetes.io/projected/cdaa4741-780c-464b-80cc-64eeaf8607de-kube-api-access-88v25\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654651 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pk9wd\" (UniqueName: \"kubernetes.io/projected/b87e83c5-3906-4d5e-ae19-58ed6148d219-kube-api-access-pk9wd\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654661 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pz7gj\" (UniqueName: \"kubernetes.io/projected/c3a94a7d-379f-4d4d-8728-3b1509189b93-kube-api-access-pz7gj\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654672 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b87e83c5-3906-4d5e-ae19-58ed6148d219-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654681 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3a94a7d-379f-4d4d-8728-3b1509189b93-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:58 crc kubenswrapper[4793]: I0127 20:27:58.654689 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdaa4741-780c-464b-80cc-64eeaf8607de-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.164230 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"8a6fa3f9-7dd5-47d9-8650-eb700dc18497","Type":"ContainerStarted","Data":"ee957e5762c592d0fd1030326d2eb1712de03431580091b8b4430b6c3dc6b196"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.190517 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nmlw2" event={"ID":"b87e83c5-3906-4d5e-ae19-58ed6148d219","Type":"ContainerDied","Data":"4b31e473650d137bb8d55417148cb46c3d9a065abe70f97602dca401334f72b6"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.190576 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b31e473650d137bb8d55417148cb46c3d9a065abe70f97602dca401334f72b6" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.190647 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nmlw2" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.199042 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=13.199020822 podStartE2EDuration="13.199020822s" podCreationTimestamp="2026-01-27 20:27:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:27:59.196684945 +0000 UTC m=+1504.586938101" watchObservedRunningTime="2026-01-27 20:27:59.199020822 +0000 UTC m=+1504.589273978" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.208156 4793 generic.go:334] "Generic (PLEG): container finished" podID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerID="670aa5e8d5be809bc290d7d8891e969f16e7c66d8312bb071336101b87296141" exitCode=137 Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.208279 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerDied","Data":"670aa5e8d5be809bc290d7d8891e969f16e7c66d8312bb071336101b87296141"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.217586 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wfkh5" event={"ID":"c3a94a7d-379f-4d4d-8728-3b1509189b93","Type":"ContainerDied","Data":"4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.217627 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed26b4ff8265ec0e875f8795f9be2c4084a5644f4f09f3bcabd0640ad6954a3" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.217738 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wfkh5" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.226574 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7g4f4" event={"ID":"cdaa4741-780c-464b-80cc-64eeaf8607de","Type":"ContainerDied","Data":"f78c14f3020b2cdb35b16e361178c0cf0910536e9405728e8214e5d180705f1b"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.226627 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f78c14f3020b2cdb35b16e361178c0cf0910536e9405728e8214e5d180705f1b" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.226736 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7g4f4" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.233774 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c","Type":"ContainerStarted","Data":"e81585243e86a66c3bc5c1369180fdfc89d7c978f7ed7c3efbdbc107db5a6b94"} Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.235154 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:27:59 crc kubenswrapper[4793]: E0127 20:27:59.237053 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.524131 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.575220 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.575673 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.575936 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576079 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576234 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576470 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576608 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2248k\" (UniqueName: \"kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k\") pod \"245a06eb-695c-4aca-ae98-93e59ca3ee86\" (UID: \"245a06eb-695c-4aca-ae98-93e59ca3ee86\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576818 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs" (OuterVolumeSpecName: "logs") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.576893 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.577501 4793 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/245a06eb-695c-4aca-ae98-93e59ca3ee86-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.577642 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/245a06eb-695c-4aca-ae98-93e59ca3ee86-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.593614 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts" (OuterVolumeSpecName: "scripts") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.594246 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k" (OuterVolumeSpecName: "kube-api-access-2248k") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "kube-api-access-2248k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.609747 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.679804 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2248k\" (UniqueName: \"kubernetes.io/projected/245a06eb-695c-4aca-ae98-93e59ca3ee86-kube-api-access-2248k\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.679839 4793 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.679849 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.680516 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.778538 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data" (OuterVolumeSpecName: "config-data") pod "245a06eb-695c-4aca-ae98-93e59ca3ee86" (UID: "245a06eb-695c-4aca-ae98-93e59ca3ee86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.781933 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.781957 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/245a06eb-695c-4aca-ae98-93e59ca3ee86-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.804344 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:27:59 crc kubenswrapper[4793]: E0127 20:27:59.804640 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.908960 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.937420 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.948780 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985261 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts\") pod \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985334 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts\") pod \"607b194b-5aaa-4b00-92f1-3448913e04f5\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985353 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrr5q\" (UniqueName: \"kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q\") pod \"8f214dfe-47ca-4d9c-804b-1672e923954f\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985383 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p56mz\" (UniqueName: \"kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz\") pod \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\" (UID: \"58fcdaca-8f5a-4bac-8b2f-754e27d164c0\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985411 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bv8n\" (UniqueName: \"kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n\") pod \"607b194b-5aaa-4b00-92f1-3448913e04f5\" (UID: \"607b194b-5aaa-4b00-92f1-3448913e04f5\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.985484 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts\") pod \"8f214dfe-47ca-4d9c-804b-1672e923954f\" (UID: \"8f214dfe-47ca-4d9c-804b-1672e923954f\") " Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.986158 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "58fcdaca-8f5a-4bac-8b2f-754e27d164c0" (UID: "58fcdaca-8f5a-4bac-8b2f-754e27d164c0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.986218 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8f214dfe-47ca-4d9c-804b-1672e923954f" (UID: "8f214dfe-47ca-4d9c-804b-1672e923954f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.987099 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "607b194b-5aaa-4b00-92f1-3448913e04f5" (UID: "607b194b-5aaa-4b00-92f1-3448913e04f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.990901 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q" (OuterVolumeSpecName: "kube-api-access-wrr5q") pod "8f214dfe-47ca-4d9c-804b-1672e923954f" (UID: "8f214dfe-47ca-4d9c-804b-1672e923954f"). InnerVolumeSpecName "kube-api-access-wrr5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.992360 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz" (OuterVolumeSpecName: "kube-api-access-p56mz") pod "58fcdaca-8f5a-4bac-8b2f-754e27d164c0" (UID: "58fcdaca-8f5a-4bac-8b2f-754e27d164c0"). InnerVolumeSpecName "kube-api-access-p56mz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:27:59 crc kubenswrapper[4793]: I0127 20:27:59.994181 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n" (OuterVolumeSpecName: "kube-api-access-4bv8n") pod "607b194b-5aaa-4b00-92f1-3448913e04f5" (UID: "607b194b-5aaa-4b00-92f1-3448913e04f5"). InnerVolumeSpecName "kube-api-access-4bv8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087616 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087905 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607b194b-5aaa-4b00-92f1-3448913e04f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087915 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrr5q\" (UniqueName: \"kubernetes.io/projected/8f214dfe-47ca-4d9c-804b-1672e923954f-kube-api-access-wrr5q\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087926 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p56mz\" (UniqueName: \"kubernetes.io/projected/58fcdaca-8f5a-4bac-8b2f-754e27d164c0-kube-api-access-p56mz\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087942 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bv8n\" (UniqueName: \"kubernetes.io/projected/607b194b-5aaa-4b00-92f1-3448913e04f5-kube-api-access-4bv8n\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.087954 4793 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f214dfe-47ca-4d9c-804b-1672e923954f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.250498 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7b87-account-create-update-qrtvs" event={"ID":"607b194b-5aaa-4b00-92f1-3448913e04f5","Type":"ContainerDied","Data":"5b7b210eaaeeccca4884b16ae5f6b6847540cbbbc0ec49d0007e5f4897d497c9"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.250593 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b7b210eaaeeccca4884b16ae5f6b6847540cbbbc0ec49d0007e5f4897d497c9" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.250516 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7b87-account-create-update-qrtvs" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.251714 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.251707 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9878-account-create-update-t2hhx" event={"ID":"58fcdaca-8f5a-4bac-8b2f-754e27d164c0","Type":"ContainerDied","Data":"0be4922c17bc286b9912e678ab1404892a6e552ac9dcaab49c26bcb3aafc7fdf"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.251799 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0be4922c17bc286b9912e678ab1404892a6e552ac9dcaab49c26bcb3aafc7fdf" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254502 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerStarted","Data":"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254610 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-central-agent" containerID="cri-o://1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254697 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-notification-agent" containerID="cri-o://15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254697 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="sg-core" containerID="cri-o://656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254808 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="proxy-httpd" containerID="cri-o://1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.254872 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.264111 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c","Type":"ContainerStarted","Data":"94bb175d7f90709d9cb9366a8d419cc459ee75ac5a176c36661c90c8c73d9f68"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.275245 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" event={"ID":"8f214dfe-47ca-4d9c-804b-1672e923954f","Type":"ContainerDied","Data":"8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.275286 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b226-account-create-update-8gr8q" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.275291 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8336fd9b6ef48e2183a88443f66174677678790d6e5692cf18f57c33213fa8b9" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.286800 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.286918 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"245a06eb-695c-4aca-ae98-93e59ca3ee86","Type":"ContainerDied","Data":"047dd9c9bdcb31aa56115831b6b590bcdb8046a7ea5b7aa26ab4555877472b7f"} Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.287015 4793 scope.go:117] "RemoveContainer" containerID="670aa5e8d5be809bc290d7d8891e969f16e7c66d8312bb071336101b87296141" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.302625 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.302937 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-log" containerID="cri-o://1f5e058677b35ac471f327db9afd2e39f6c681c28374b80600c4b632f4c744cb" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.303152 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-httpd" containerID="cri-o://a4d64b64b9ed12660713d7b7e2bd1cba9c4ce5fe4df5f28ac3c4c100ed55171f" gracePeriod=30 Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.316474 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.99695106 podStartE2EDuration="15.316448855s" podCreationTimestamp="2026-01-27 20:27:45 +0000 UTC" firstStartedPulling="2026-01-27 20:27:46.517703775 +0000 UTC m=+1491.907956931" lastFinishedPulling="2026-01-27 20:27:58.83720157 +0000 UTC m=+1504.227454726" observedRunningTime="2026-01-27 20:28:00.282774667 +0000 UTC m=+1505.673027823" watchObservedRunningTime="2026-01-27 20:28:00.316448855 +0000 UTC m=+1505.706702011" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.331884 4793 scope.go:117] "RemoveContainer" containerID="b47f000ec28914b3c309b283c56a015d67e34250197653eb6e74fb47093b2dcb" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.357164 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.357139526 podStartE2EDuration="5.357139526s" podCreationTimestamp="2026-01-27 20:27:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:00.337250336 +0000 UTC m=+1505.727503492" watchObservedRunningTime="2026-01-27 20:28:00.357139526 +0000 UTC m=+1505.747392682" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.397422 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.426036 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.447727 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.448522 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58fcdaca-8f5a-4bac-8b2f-754e27d164c0" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.448623 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="58fcdaca-8f5a-4bac-8b2f-754e27d164c0" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.448687 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f214dfe-47ca-4d9c-804b-1672e923954f" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.448743 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f214dfe-47ca-4d9c-804b-1672e923954f" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.448830 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3a94a7d-379f-4d4d-8728-3b1509189b93" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.448885 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3a94a7d-379f-4d4d-8728-3b1509189b93" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.448942 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b87e83c5-3906-4d5e-ae19-58ed6148d219" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.448993 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b87e83c5-3906-4d5e-ae19-58ed6148d219" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.449165 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607b194b-5aaa-4b00-92f1-3448913e04f5" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.449235 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="607b194b-5aaa-4b00-92f1-3448913e04f5" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.449301 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.449354 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.449417 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdaa4741-780c-464b-80cc-64eeaf8607de" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.449495 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdaa4741-780c-464b-80cc-64eeaf8607de" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: E0127 20:28:00.449605 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api-log" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.449673 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api-log" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.449960 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f214dfe-47ca-4d9c-804b-1672e923954f" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450069 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdaa4741-780c-464b-80cc-64eeaf8607de" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450173 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b87e83c5-3906-4d5e-ae19-58ed6148d219" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450243 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api-log" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450302 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3a94a7d-379f-4d4d-8728-3b1509189b93" containerName="mariadb-database-create" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450375 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="607b194b-5aaa-4b00-92f1-3448913e04f5" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450454 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="58fcdaca-8f5a-4bac-8b2f-754e27d164c0" containerName="mariadb-account-create-update" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.450540 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" containerName="cinder-api" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.451799 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.458212 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.458673 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.459308 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.468901 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.596724 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598117 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data-custom\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598244 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/006b6058-ee89-4438-92d7-3c02b8136803-etc-machine-id\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598362 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598479 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljbmf\" (UniqueName: \"kubernetes.io/projected/006b6058-ee89-4438-92d7-3c02b8136803-kube-api-access-ljbmf\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598731 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-public-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.598895 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.599014 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006b6058-ee89-4438-92d7-3c02b8136803-logs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.599153 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-scripts\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.814109 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-public-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.814370 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.814492 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006b6058-ee89-4438-92d7-3c02b8136803-logs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.815185 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006b6058-ee89-4438-92d7-3c02b8136803-logs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.817474 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-scripts\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.817621 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.818220 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data-custom\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.818263 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/006b6058-ee89-4438-92d7-3c02b8136803-etc-machine-id\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.818352 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.818429 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljbmf\" (UniqueName: \"kubernetes.io/projected/006b6058-ee89-4438-92d7-3c02b8136803-kube-api-access-ljbmf\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.822095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/006b6058-ee89-4438-92d7-3c02b8136803-etc-machine-id\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.825155 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.829658 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.856728 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.857003 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-public-tls-certs\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.868240 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-scripts\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.870473 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/006b6058-ee89-4438-92d7-3c02b8136803-config-data-custom\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:00 crc kubenswrapper[4793]: I0127 20:28:00.894092 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljbmf\" (UniqueName: \"kubernetes.io/projected/006b6058-ee89-4438-92d7-3c02b8136803-kube-api-access-ljbmf\") pod \"cinder-api-0\" (UID: \"006b6058-ee89-4438-92d7-3c02b8136803\") " pod="openstack/cinder-api-0" Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.093588 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.318063 4793 generic.go:334] "Generic (PLEG): container finished" podID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerID="1f5e058677b35ac471f327db9afd2e39f6c681c28374b80600c4b632f4c744cb" exitCode=143 Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.318358 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerDied","Data":"1f5e058677b35ac471f327db9afd2e39f6c681c28374b80600c4b632f4c744cb"} Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348783 4793 generic.go:334] "Generic (PLEG): container finished" podID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerID="1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb" exitCode=0 Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348822 4793 generic.go:334] "Generic (PLEG): container finished" podID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerID="656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263" exitCode=2 Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348830 4793 generic.go:334] "Generic (PLEG): container finished" podID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerID="15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd" exitCode=0 Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348896 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerDied","Data":"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb"} Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348923 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerDied","Data":"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263"} Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.348933 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerDied","Data":"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd"} Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.735814 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.816278 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="245a06eb-695c-4aca-ae98-93e59ca3ee86" path="/var/lib/kubelet/pods/245a06eb-695c-4aca-ae98-93e59ca3ee86/volumes" Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.829047 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 20:28:01 crc kubenswrapper[4793]: I0127 20:28:01.987952 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.389387 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"006b6058-ee89-4438-92d7-3c02b8136803","Type":"ContainerStarted","Data":"935f6781a7e9937f0abb9352efa653497f1271e79b8421c2e907ffaa6e8541af"} Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.393831 4793 generic.go:334] "Generic (PLEG): container finished" podID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerID="a4d64b64b9ed12660713d7b7e2bd1cba9c4ce5fe4df5f28ac3c4c100ed55171f" exitCode=0 Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.395261 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerDied","Data":"a4d64b64b9ed12660713d7b7e2bd1cba9c4ce5fe4df5f28ac3c4c100ed55171f"} Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.767097 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.866897 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.866971 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867016 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867148 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867713 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867783 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s975l\" (UniqueName: \"kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867844 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.867906 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs\") pod \"6c3db2fc-e883-483d-85fa-953061fd0f5a\" (UID: \"6c3db2fc-e883-483d-85fa-953061fd0f5a\") " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.868524 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.869122 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs" (OuterVolumeSpecName: "logs") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.869566 4793 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.869602 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3db2fc-e883-483d-85fa-953061fd0f5a-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.876613 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.877562 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts" (OuterVolumeSpecName: "scripts") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.878798 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l" (OuterVolumeSpecName: "kube-api-access-s975l") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "kube-api-access-s975l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.916733 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.961137 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.961662 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data" (OuterVolumeSpecName: "config-data") pod "6c3db2fc-e883-483d-85fa-953061fd0f5a" (UID: "6c3db2fc-e883-483d-85fa-953061fd0f5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972723 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972758 4793 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972775 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972787 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972798 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3db2fc-e883-483d-85fa-953061fd0f5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:02 crc kubenswrapper[4793]: I0127 20:28:02.972809 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s975l\" (UniqueName: \"kubernetes.io/projected/6c3db2fc-e883-483d-85fa-953061fd0f5a-kube-api-access-s975l\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.024187 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.076945 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.425006 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"006b6058-ee89-4438-92d7-3c02b8136803","Type":"ContainerStarted","Data":"cf1abbd04dd64db96595e75278c33421fd9da52af0ec10f256a6bf03ec77b8cf"} Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.428749 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c3db2fc-e883-483d-85fa-953061fd0f5a","Type":"ContainerDied","Data":"f90d358afc02ec79d00e0fabaf969358fe97776a36cd11ab36ab0c974742ac66"} Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.428822 4793 scope.go:117] "RemoveContainer" containerID="a4d64b64b9ed12660713d7b7e2bd1cba9c4ce5fe4df5f28ac3c4c100ed55171f" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.429120 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.490125 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.505182 4793 scope.go:117] "RemoveContainer" containerID="1f5e058677b35ac471f327db9afd2e39f6c681c28374b80600c4b632f4c744cb" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.528735 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.572572 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:03 crc kubenswrapper[4793]: E0127 20:28:03.573984 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-log" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.574013 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-log" Jan 27 20:28:03 crc kubenswrapper[4793]: E0127 20:28:03.574033 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-httpd" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.574040 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-httpd" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.575948 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-httpd" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.576070 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" containerName="glance-log" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.578668 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.583042 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.583213 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.605220 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715481 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715525 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715605 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715640 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6jt2\" (UniqueName: \"kubernetes.io/projected/bf13be98-0b5a-4b5b-8b85-696c9c35101d-kube-api-access-w6jt2\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715666 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-logs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715764 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.715792 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.817655 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818051 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818134 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818159 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818194 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818233 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818262 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6jt2\" (UniqueName: \"kubernetes.io/projected/bf13be98-0b5a-4b5b-8b85-696c9c35101d-kube-api-access-w6jt2\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.818284 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-logs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.819700 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.819783 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-logs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.820658 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf13be98-0b5a-4b5b-8b85-696c9c35101d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.827862 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.829167 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.829593 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.831360 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf13be98-0b5a-4b5b-8b85-696c9c35101d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.837576 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c3db2fc-e883-483d-85fa-953061fd0f5a" path="/var/lib/kubelet/pods/6c3db2fc-e883-483d-85fa-953061fd0f5a/volumes" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.839899 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6jt2\" (UniqueName: \"kubernetes.io/projected/bf13be98-0b5a-4b5b-8b85-696c9c35101d-kube-api-access-w6jt2\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.865490 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"bf13be98-0b5a-4b5b-8b85-696c9c35101d\") " pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.970841 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:03 crc kubenswrapper[4793]: I0127 20:28:03.993224 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035187 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035734 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcljm\" (UniqueName: \"kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035758 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035850 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035905 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035950 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.035994 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd\") pod \"6f293f00-54d9-41da-ab08-efff8919f8b5\" (UID: \"6f293f00-54d9-41da-ab08-efff8919f8b5\") " Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.037966 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.042807 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm" (OuterVolumeSpecName: "kube-api-access-xcljm") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "kube-api-access-xcljm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.052255 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts" (OuterVolumeSpecName: "scripts") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.052837 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.070842 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.302321 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.304577 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.304737 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.304904 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6f293f00-54d9-41da-ab08-efff8919f8b5-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.304989 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcljm\" (UniqueName: \"kubernetes.io/projected/6f293f00-54d9-41da-ab08-efff8919f8b5-kube-api-access-xcljm\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.358835 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.419896 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.447884 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data" (OuterVolumeSpecName: "config-data") pod "6f293f00-54d9-41da-ab08-efff8919f8b5" (UID: "6f293f00-54d9-41da-ab08-efff8919f8b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.448199 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kdwlw"] Jan 27 20:28:04 crc kubenswrapper[4793]: E0127 20:28:04.448731 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-notification-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.448750 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-notification-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: E0127 20:28:04.448791 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-central-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.448798 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-central-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: E0127 20:28:04.448817 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="sg-core" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.448823 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="sg-core" Jan 27 20:28:04 crc kubenswrapper[4793]: E0127 20:28:04.450268 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="proxy-httpd" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.450288 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="proxy-httpd" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.450580 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="proxy-httpd" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.450598 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-central-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.450618 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="ceilometer-notification-agent" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.450634 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerName="sg-core" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.452846 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.456924 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kmrnn" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.457996 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.458239 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.460569 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kdwlw"] Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.508219 4793 generic.go:334] "Generic (PLEG): container finished" podID="6f293f00-54d9-41da-ab08-efff8919f8b5" containerID="1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab" exitCode=0 Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.508469 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerDied","Data":"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab"} Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.508593 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6f293f00-54d9-41da-ab08-efff8919f8b5","Type":"ContainerDied","Data":"ba5306e162362475d7f5ebf5b86d1a615020c15ddb389e069e62e684c08ca716"} Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.508679 4793 scope.go:117] "RemoveContainer" containerID="1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.508736 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.519783 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"006b6058-ee89-4438-92d7-3c02b8136803","Type":"ContainerStarted","Data":"85fdce201e557e5b356e5d53718367e5e744d8826a547be45f89ea97faa62374"} Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.522043 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.527290 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.527342 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m54nh\" (UniqueName: \"kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.527523 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.527672 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.527746 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f293f00-54d9-41da-ab08-efff8919f8b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.574430 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.574398972 podStartE2EDuration="4.574398972s" podCreationTimestamp="2026-01-27 20:28:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:04.560917651 +0000 UTC m=+1509.951170807" watchObservedRunningTime="2026-01-27 20:28:04.574398972 +0000 UTC m=+1509.964652128" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.588928 4793 scope.go:117] "RemoveContainer" containerID="656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.614698 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.627838 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.628963 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.629047 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.629102 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.629120 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m54nh\" (UniqueName: \"kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.636786 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.637093 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.638752 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.648833 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m54nh\" (UniqueName: \"kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh\") pod \"nova-cell0-conductor-db-sync-kdwlw\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.649154 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.653132 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.656621 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.656989 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.669320 4793 scope.go:117] "RemoveContainer" containerID="15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.673293 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.730927 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.731446 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.731658 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.731800 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.731921 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.732065 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.732194 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k5b2\" (UniqueName: \"kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.737709 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 20:28:04 crc kubenswrapper[4793]: W0127 20:28:04.746540 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf13be98_0b5a_4b5b_8b85_696c9c35101d.slice/crio-e257c739c96de10578342d29464e6f9039557d60bb4f816b32d09d758a0471ca WatchSource:0}: Error finding container e257c739c96de10578342d29464e6f9039557d60bb4f816b32d09d758a0471ca: Status 404 returned error can't find the container with id e257c739c96de10578342d29464e6f9039557d60bb4f816b32d09d758a0471ca Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.809190 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834068 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834148 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834202 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k5b2\" (UniqueName: \"kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834254 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834283 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834348 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834372 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.834949 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.835200 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.839271 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.839923 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.840397 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.841138 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.857757 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k5b2\" (UniqueName: \"kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2\") pod \"ceilometer-0\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.922696 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:04 crc kubenswrapper[4793]: I0127 20:28:04.945919 4793 scope.go:117] "RemoveContainer" containerID="1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.082248 4793 scope.go:117] "RemoveContainer" containerID="1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb" Jan 27 20:28:05 crc kubenswrapper[4793]: E0127 20:28:05.083127 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb\": container with ID starting with 1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb not found: ID does not exist" containerID="1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.083167 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb"} err="failed to get container status \"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb\": rpc error: code = NotFound desc = could not find container \"1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb\": container with ID starting with 1a93c5a048a00be0bf0a32f64d094d957c9384ee641ca378ce75af993dfdcfcb not found: ID does not exist" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.083191 4793 scope.go:117] "RemoveContainer" containerID="656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263" Jan 27 20:28:05 crc kubenswrapper[4793]: E0127 20:28:05.083579 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263\": container with ID starting with 656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263 not found: ID does not exist" containerID="656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.083649 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263"} err="failed to get container status \"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263\": rpc error: code = NotFound desc = could not find container \"656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263\": container with ID starting with 656dc3c10c5825556c9fa10b19d415612f17ed75fa91b8e593ed5e0a45204263 not found: ID does not exist" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.083703 4793 scope.go:117] "RemoveContainer" containerID="15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd" Jan 27 20:28:05 crc kubenswrapper[4793]: E0127 20:28:05.099815 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd\": container with ID starting with 15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd not found: ID does not exist" containerID="15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.099865 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd"} err="failed to get container status \"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd\": rpc error: code = NotFound desc = could not find container \"15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd\": container with ID starting with 15397e97255b54045e59c762978b58b5b4a0c150fe5b3b5b8f7bc5334b6c9bcd not found: ID does not exist" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.099904 4793 scope.go:117] "RemoveContainer" containerID="1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab" Jan 27 20:28:05 crc kubenswrapper[4793]: E0127 20:28:05.105108 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab\": container with ID starting with 1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab not found: ID does not exist" containerID="1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.105221 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab"} err="failed to get container status \"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab\": rpc error: code = NotFound desc = could not find container \"1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab\": container with ID starting with 1067b6df4301bd0f163ae99848469b3baca5992595763318e9791ed2be379cab not found: ID does not exist" Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.216655 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kdwlw"] Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.556487 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bf13be98-0b5a-4b5b-8b85-696c9c35101d","Type":"ContainerStarted","Data":"e257c739c96de10578342d29464e6f9039557d60bb4f816b32d09d758a0471ca"} Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.559114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" event={"ID":"599d34c5-5606-4125-865c-ff142d5fce8d","Type":"ContainerStarted","Data":"f80b810311ced09a4974bd8d502595416cf4cf48a97ee60fdb675bf3d8fbae82"} Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.602036 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:05 crc kubenswrapper[4793]: I0127 20:28:05.823582 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f293f00-54d9-41da-ab08-efff8919f8b5" path="/var/lib/kubelet/pods/6f293f00-54d9-41da-ab08-efff8919f8b5/volumes" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.127849 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.127903 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.189076 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.203398 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.572800 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bf13be98-0b5a-4b5b-8b85-696c9c35101d","Type":"ContainerStarted","Data":"33fc25de5b49783e4e2e2ca1ace47f5e024b0aa84a1b397aa82654551e8c17ac"} Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.573187 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bf13be98-0b5a-4b5b-8b85-696c9c35101d","Type":"ContainerStarted","Data":"679e39fb69eb51806ff1beb8e7e6c698ee12e4192c2be373fc1ab14547aa9222"} Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.578577 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerStarted","Data":"d8474966c2d0acd4b9224731d471424be99dfd680f86a82e43f2662a3a059c1f"} Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.578640 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerStarted","Data":"a72ad186481bcdbef59b21b3b72cb53534800cd94451703905e859382f8ac3f5"} Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.578664 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerStarted","Data":"00d04e5e31e71ed6a0add0a4590c3c487abe0302dfcfb0630b2906a1649142aa"} Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.578686 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.579037 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 20:28:06 crc kubenswrapper[4793]: I0127 20:28:06.601568 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.601520642 podStartE2EDuration="3.601520642s" podCreationTimestamp="2026-01-27 20:28:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:06.596057758 +0000 UTC m=+1511.986310924" watchObservedRunningTime="2026-01-27 20:28:06.601520642 +0000 UTC m=+1511.991773808" Jan 27 20:28:07 crc kubenswrapper[4793]: I0127 20:28:07.482604 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:07 crc kubenswrapper[4793]: I0127 20:28:07.641800 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerStarted","Data":"4c6bf41d808408660b902a7302b5c82971454991eefa0fddb7c0b2c0868b932d"} Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.669344 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.669809 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.670334 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="proxy-httpd" containerID="cri-o://57ddea27c26fd277acfc2270877b9d7e587de2fc84465cd194cfc9dbae24d76e" gracePeriod=30 Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.670459 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="sg-core" containerID="cri-o://4c6bf41d808408660b902a7302b5c82971454991eefa0fddb7c0b2c0868b932d" gracePeriod=30 Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.670540 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-notification-agent" containerID="cri-o://d8474966c2d0acd4b9224731d471424be99dfd680f86a82e43f2662a3a059c1f" gracePeriod=30 Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.670618 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerStarted","Data":"57ddea27c26fd277acfc2270877b9d7e587de2fc84465cd194cfc9dbae24d76e"} Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.670648 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.669759 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-central-agent" containerID="cri-o://a72ad186481bcdbef59b21b3b72cb53534800cd94451703905e859382f8ac3f5" gracePeriod=30 Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.707367 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.030088026 podStartE2EDuration="4.707340746s" podCreationTimestamp="2026-01-27 20:28:04 +0000 UTC" firstStartedPulling="2026-01-27 20:28:05.617264362 +0000 UTC m=+1511.007517638" lastFinishedPulling="2026-01-27 20:28:08.294517202 +0000 UTC m=+1513.684770358" observedRunningTime="2026-01-27 20:28:08.698419468 +0000 UTC m=+1514.088672624" watchObservedRunningTime="2026-01-27 20:28:08.707340746 +0000 UTC m=+1514.097593902" Jan 27 20:28:08 crc kubenswrapper[4793]: I0127 20:28:08.959488 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 20:28:09 crc kubenswrapper[4793]: I0127 20:28:09.031161 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 20:28:09 crc kubenswrapper[4793]: I0127 20:28:09.690332 4793 generic.go:334] "Generic (PLEG): container finished" podID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerID="4c6bf41d808408660b902a7302b5c82971454991eefa0fddb7c0b2c0868b932d" exitCode=2 Jan 27 20:28:09 crc kubenswrapper[4793]: I0127 20:28:09.690369 4793 generic.go:334] "Generic (PLEG): container finished" podID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerID="d8474966c2d0acd4b9224731d471424be99dfd680f86a82e43f2662a3a059c1f" exitCode=0 Jan 27 20:28:09 crc kubenswrapper[4793]: I0127 20:28:09.690597 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerDied","Data":"4c6bf41d808408660b902a7302b5c82971454991eefa0fddb7c0b2c0868b932d"} Jan 27 20:28:09 crc kubenswrapper[4793]: I0127 20:28:09.691118 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerDied","Data":"d8474966c2d0acd4b9224731d471424be99dfd680f86a82e43f2662a3a059c1f"} Jan 27 20:28:11 crc kubenswrapper[4793]: I0127 20:28:11.808193 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:28:11 crc kubenswrapper[4793]: E0127 20:28:11.810099 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:28:12 crc kubenswrapper[4793]: I0127 20:28:12.744160 4793 generic.go:334] "Generic (PLEG): container finished" podID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerID="a72ad186481bcdbef59b21b3b72cb53534800cd94451703905e859382f8ac3f5" exitCode=0 Jan 27 20:28:12 crc kubenswrapper[4793]: I0127 20:28:12.744208 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerDied","Data":"a72ad186481bcdbef59b21b3b72cb53534800cd94451703905e859382f8ac3f5"} Jan 27 20:28:12 crc kubenswrapper[4793]: I0127 20:28:12.803636 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:28:12 crc kubenswrapper[4793]: E0127 20:28:12.804182 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 40s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(ffc46614-5f6d-40ad-a388-1ff326d22ee6)\"" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" Jan 27 20:28:13 crc kubenswrapper[4793]: I0127 20:28:13.570613 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 27 20:28:13 crc kubenswrapper[4793]: I0127 20:28:13.971913 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:13 crc kubenswrapper[4793]: I0127 20:28:13.971966 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:14 crc kubenswrapper[4793]: I0127 20:28:14.013325 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:14 crc kubenswrapper[4793]: I0127 20:28:14.020767 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:14 crc kubenswrapper[4793]: I0127 20:28:14.770601 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:14 crc kubenswrapper[4793]: I0127 20:28:14.771400 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:17 crc kubenswrapper[4793]: I0127 20:28:17.065976 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:17 crc kubenswrapper[4793]: I0127 20:28:17.066355 4793 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 20:28:17 crc kubenswrapper[4793]: I0127 20:28:17.072410 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 20:28:20 crc kubenswrapper[4793]: I0127 20:28:20.852638 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" event={"ID":"599d34c5-5606-4125-865c-ff142d5fce8d","Type":"ContainerStarted","Data":"c310c8f3cd5f9eccfaa63e08eeaf120a61177eb1d1629a9b4b97725acdc11bcb"} Jan 27 20:28:20 crc kubenswrapper[4793]: I0127 20:28:20.869493 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" podStartSLOduration=2.375092018 podStartE2EDuration="16.869474286s" podCreationTimestamp="2026-01-27 20:28:04 +0000 UTC" firstStartedPulling="2026-01-27 20:28:05.223523305 +0000 UTC m=+1510.613776461" lastFinishedPulling="2026-01-27 20:28:19.717905573 +0000 UTC m=+1525.108158729" observedRunningTime="2026-01-27 20:28:20.868353328 +0000 UTC m=+1526.258606494" watchObservedRunningTime="2026-01-27 20:28:20.869474286 +0000 UTC m=+1526.259727432" Jan 27 20:28:24 crc kubenswrapper[4793]: I0127 20:28:24.804058 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:28:25 crc kubenswrapper[4793]: I0127 20:28:25.809765 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:28:25 crc kubenswrapper[4793]: I0127 20:28:25.901376 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e"} Jan 27 20:28:26 crc kubenswrapper[4793]: I0127 20:28:26.912876 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerStarted","Data":"b7064c87a3f192cde4ead92e54cb033d42bc8dcfceedc9a062d08cf4e08281fd"} Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.056437 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.056849 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.087269 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.242520 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.242625 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:28:28 crc kubenswrapper[4793]: E0127 20:28:28.244007 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e is running failed: container process not found" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 20:28:28 crc kubenswrapper[4793]: E0127 20:28:28.244373 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e is running failed: container process not found" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 20:28:28 crc kubenswrapper[4793]: E0127 20:28:28.244777 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e is running failed: container process not found" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 20:28:28 crc kubenswrapper[4793]: E0127 20:28:28.244820 4793 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e is running failed: container process not found" probeType="Startup" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerName="watcher-applier" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.935248 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" exitCode=1 Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.935442 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e"} Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.935640 4793 scope.go:117] "RemoveContainer" containerID="7fb7b446c25d1ff79e3de54a1a928227db9260a423622219b9cb7ae4758d64dc" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.936294 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:28:28 crc kubenswrapper[4793]: E0127 20:28:28.936567 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:28:28 crc kubenswrapper[4793]: I0127 20:28:28.984001 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:29 crc kubenswrapper[4793]: I0127 20:28:29.105112 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:30 crc kubenswrapper[4793]: I0127 20:28:30.956854 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" containerID="cri-o://b7064c87a3f192cde4ead92e54cb033d42bc8dcfceedc9a062d08cf4e08281fd" gracePeriod=30 Jan 27 20:28:32 crc kubenswrapper[4793]: I0127 20:28:32.981989 4793 generic.go:334] "Generic (PLEG): container finished" podID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerID="b7064c87a3f192cde4ead92e54cb033d42bc8dcfceedc9a062d08cf4e08281fd" exitCode=0 Jan 27 20:28:32 crc kubenswrapper[4793]: I0127 20:28:32.982084 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"b7064c87a3f192cde4ead92e54cb033d42bc8dcfceedc9a062d08cf4e08281fd"} Jan 27 20:28:32 crc kubenswrapper[4793]: I0127 20:28:32.982372 4793 scope.go:117] "RemoveContainer" containerID="8fc61af3848de745688268804290928f223177c35ea49f4f7841a974d950a45b" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.207643 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.298872 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzw87\" (UniqueName: \"kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87\") pod \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.298950 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs\") pod \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.299010 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle\") pod \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.299180 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data\") pod \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.299204 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca\") pod \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\" (UID: \"ffc46614-5f6d-40ad-a388-1ff326d22ee6\") " Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.299486 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs" (OuterVolumeSpecName: "logs") pod "ffc46614-5f6d-40ad-a388-1ff326d22ee6" (UID: "ffc46614-5f6d-40ad-a388-1ff326d22ee6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.299897 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffc46614-5f6d-40ad-a388-1ff326d22ee6-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.305153 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87" (OuterVolumeSpecName: "kube-api-access-tzw87") pod "ffc46614-5f6d-40ad-a388-1ff326d22ee6" (UID: "ffc46614-5f6d-40ad-a388-1ff326d22ee6"). InnerVolumeSpecName "kube-api-access-tzw87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.334822 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffc46614-5f6d-40ad-a388-1ff326d22ee6" (UID: "ffc46614-5f6d-40ad-a388-1ff326d22ee6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.343968 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "ffc46614-5f6d-40ad-a388-1ff326d22ee6" (UID: "ffc46614-5f6d-40ad-a388-1ff326d22ee6"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.362018 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data" (OuterVolumeSpecName: "config-data") pod "ffc46614-5f6d-40ad-a388-1ff326d22ee6" (UID: "ffc46614-5f6d-40ad-a388-1ff326d22ee6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.401528 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.401582 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.401593 4793 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/ffc46614-5f6d-40ad-a388-1ff326d22ee6-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:33 crc kubenswrapper[4793]: I0127 20:28:33.401605 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzw87\" (UniqueName: \"kubernetes.io/projected/ffc46614-5f6d-40ad-a388-1ff326d22ee6-kube-api-access-tzw87\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.019721 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"ffc46614-5f6d-40ad-a388-1ff326d22ee6","Type":"ContainerDied","Data":"d58b45698ec3b203a68b847f573b5a69ed4db504290a5c4507f43ea577d2b3b9"} Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.019775 4793 scope.go:117] "RemoveContainer" containerID="b7064c87a3f192cde4ead92e54cb033d42bc8dcfceedc9a062d08cf4e08281fd" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.019775 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.059941 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.080639 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.098448 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:34 crc kubenswrapper[4793]: E0127 20:28:34.099162 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.099194 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: E0127 20:28:34.099235 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.099249 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: E0127 20:28:34.099292 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.099301 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.101062 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.101104 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.101131 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.104715 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.108425 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.112328 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.222801 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.223118 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wbl5\" (UniqueName: \"kubernetes.io/projected/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-kube-api-access-2wbl5\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.223344 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.223466 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.223612 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-logs\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.325713 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wbl5\" (UniqueName: \"kubernetes.io/projected/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-kube-api-access-2wbl5\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.325862 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.325892 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.325933 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-logs\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.325992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.326562 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-logs\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.330389 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.330453 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.332240 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.345026 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wbl5\" (UniqueName: \"kubernetes.io/projected/0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0-kube-api-access-2wbl5\") pod \"watcher-decision-engine-0\" (UID: \"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0\") " pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.425889 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.875462 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 27 20:28:34 crc kubenswrapper[4793]: I0127 20:28:34.936274 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 20:28:35 crc kubenswrapper[4793]: I0127 20:28:35.059881 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0","Type":"ContainerStarted","Data":"6c72a88091bd6157445766ca778a8d0e37ba7a77db3a9248d93327749f0f6364"} Jan 27 20:28:35 crc kubenswrapper[4793]: I0127 20:28:35.815846 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" path="/var/lib/kubelet/pods/ffc46614-5f6d-40ad-a388-1ff326d22ee6/volumes" Jan 27 20:28:36 crc kubenswrapper[4793]: I0127 20:28:36.112657 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0","Type":"ContainerStarted","Data":"ea67a4045a0cad2982642d8087dc488673be715818c8ddfddfbd6e807422a6a3"} Jan 27 20:28:36 crc kubenswrapper[4793]: I0127 20:28:36.130939 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.130919374 podStartE2EDuration="2.130919374s" podCreationTimestamp="2026-01-27 20:28:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:36.126821903 +0000 UTC m=+1541.517075059" watchObservedRunningTime="2026-01-27 20:28:36.130919374 +0000 UTC m=+1541.521172530" Jan 27 20:28:38 crc kubenswrapper[4793]: I0127 20:28:38.243232 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:28:38 crc kubenswrapper[4793]: I0127 20:28:38.243597 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:28:38 crc kubenswrapper[4793]: I0127 20:28:38.244382 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:28:38 crc kubenswrapper[4793]: E0127 20:28:38.244632 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.145891 4793 generic.go:334] "Generic (PLEG): container finished" podID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerID="57ddea27c26fd277acfc2270877b9d7e587de2fc84465cd194cfc9dbae24d76e" exitCode=137 Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.145956 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerDied","Data":"57ddea27c26fd277acfc2270877b9d7e587de2fc84465cd194cfc9dbae24d76e"} Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.146221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"39ea7a6f-2069-4513-9d1d-b8f95ae619d3","Type":"ContainerDied","Data":"00d04e5e31e71ed6a0add0a4590c3c487abe0302dfcfb0630b2906a1649142aa"} Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.146237 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00d04e5e31e71ed6a0add0a4590c3c487abe0302dfcfb0630b2906a1649142aa" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.168781 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.322986 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323044 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323090 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323178 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323240 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323295 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k5b2\" (UniqueName: \"kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323754 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.323770 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.324000 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts\") pod \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\" (UID: \"39ea7a6f-2069-4513-9d1d-b8f95ae619d3\") " Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.324536 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.324570 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.328686 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts" (OuterVolumeSpecName: "scripts") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.330128 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2" (OuterVolumeSpecName: "kube-api-access-6k5b2") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "kube-api-access-6k5b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.351303 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.411536 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.426285 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.426318 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.426340 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k5b2\" (UniqueName: \"kubernetes.io/projected/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-kube-api-access-6k5b2\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.426353 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.430928 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data" (OuterVolumeSpecName: "config-data") pod "39ea7a6f-2069-4513-9d1d-b8f95ae619d3" (UID: "39ea7a6f-2069-4513-9d1d-b8f95ae619d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:39 crc kubenswrapper[4793]: I0127 20:28:39.527925 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39ea7a6f-2069-4513-9d1d-b8f95ae619d3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.155827 4793 generic.go:334] "Generic (PLEG): container finished" podID="599d34c5-5606-4125-865c-ff142d5fce8d" containerID="c310c8f3cd5f9eccfaa63e08eeaf120a61177eb1d1629a9b4b97725acdc11bcb" exitCode=0 Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.155917 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" event={"ID":"599d34c5-5606-4125-865c-ff142d5fce8d","Type":"ContainerDied","Data":"c310c8f3cd5f9eccfaa63e08eeaf120a61177eb1d1629a9b4b97725acdc11bcb"} Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.155927 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.203471 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.214784 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.234465 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.234971 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-central-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.234991 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-central-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.235018 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="sg-core" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235024 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="sg-core" Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.235035 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="proxy-httpd" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235042 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="proxy-httpd" Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.235062 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-notification-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235068 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-notification-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.235084 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235089 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: E0127 20:28:40.235102 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235108 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235282 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-central-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235301 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="ceilometer-notification-agent" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235318 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="sg-core" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235327 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235338 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" containerName="proxy-httpd" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.235348 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc46614-5f6d-40ad-a388-1ff326d22ee6" containerName="watcher-decision-engine" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.237299 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.239823 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.240094 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.240821 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.240878 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.241016 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrb7z\" (UniqueName: \"kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.241114 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.241249 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.241305 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.241341 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.247040 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.342514 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrb7z\" (UniqueName: \"kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.342841 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.342901 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.342931 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.342954 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.343033 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.343070 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.343693 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.343742 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.353317 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.355244 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.355588 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.355584 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.362342 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrb7z\" (UniqueName: \"kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z\") pod \"ceilometer-0\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " pod="openstack/ceilometer-0" Jan 27 20:28:40 crc kubenswrapper[4793]: I0127 20:28:40.559882 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.021667 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:28:41 crc kubenswrapper[4793]: W0127 20:28:41.025311 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22ca534e_e36b_4d60_845b_ba1752c5c99c.slice/crio-edb073055de088e86ff592c8637a226e873eed9a783774eeb3ef5f92ef50e319 WatchSource:0}: Error finding container edb073055de088e86ff592c8637a226e873eed9a783774eeb3ef5f92ef50e319: Status 404 returned error can't find the container with id edb073055de088e86ff592c8637a226e873eed9a783774eeb3ef5f92ef50e319 Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.167228 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerStarted","Data":"edb073055de088e86ff592c8637a226e873eed9a783774eeb3ef5f92ef50e319"} Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.504229 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.593538 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle\") pod \"599d34c5-5606-4125-865c-ff142d5fce8d\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.593613 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m54nh\" (UniqueName: \"kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh\") pod \"599d34c5-5606-4125-865c-ff142d5fce8d\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.593732 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data\") pod \"599d34c5-5606-4125-865c-ff142d5fce8d\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.593772 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts\") pod \"599d34c5-5606-4125-865c-ff142d5fce8d\" (UID: \"599d34c5-5606-4125-865c-ff142d5fce8d\") " Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.600513 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts" (OuterVolumeSpecName: "scripts") pod "599d34c5-5606-4125-865c-ff142d5fce8d" (UID: "599d34c5-5606-4125-865c-ff142d5fce8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.600784 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh" (OuterVolumeSpecName: "kube-api-access-m54nh") pod "599d34c5-5606-4125-865c-ff142d5fce8d" (UID: "599d34c5-5606-4125-865c-ff142d5fce8d"). InnerVolumeSpecName "kube-api-access-m54nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.630040 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data" (OuterVolumeSpecName: "config-data") pod "599d34c5-5606-4125-865c-ff142d5fce8d" (UID: "599d34c5-5606-4125-865c-ff142d5fce8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.630533 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "599d34c5-5606-4125-865c-ff142d5fce8d" (UID: "599d34c5-5606-4125-865c-ff142d5fce8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.696152 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.696429 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.696512 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599d34c5-5606-4125-865c-ff142d5fce8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.696638 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m54nh\" (UniqueName: \"kubernetes.io/projected/599d34c5-5606-4125-865c-ff142d5fce8d-kube-api-access-m54nh\") on node \"crc\" DevicePath \"\"" Jan 27 20:28:41 crc kubenswrapper[4793]: I0127 20:28:41.839395 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39ea7a6f-2069-4513-9d1d-b8f95ae619d3" path="/var/lib/kubelet/pods/39ea7a6f-2069-4513-9d1d-b8f95ae619d3/volumes" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.188744 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerStarted","Data":"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3"} Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.188829 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerStarted","Data":"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c"} Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.190999 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" event={"ID":"599d34c5-5606-4125-865c-ff142d5fce8d","Type":"ContainerDied","Data":"f80b810311ced09a4974bd8d502595416cf4cf48a97ee60fdb675bf3d8fbae82"} Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.191061 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f80b810311ced09a4974bd8d502595416cf4cf48a97ee60fdb675bf3d8fbae82" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.191176 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kdwlw" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.294378 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 20:28:42 crc kubenswrapper[4793]: E0127 20:28:42.294894 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="599d34c5-5606-4125-865c-ff142d5fce8d" containerName="nova-cell0-conductor-db-sync" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.294919 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="599d34c5-5606-4125-865c-ff142d5fce8d" containerName="nova-cell0-conductor-db-sync" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.295192 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="599d34c5-5606-4125-865c-ff142d5fce8d" containerName="nova-cell0-conductor-db-sync" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.296085 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.299099 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kmrnn" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.299528 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.314289 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfsdj\" (UniqueName: \"kubernetes.io/projected/d442904a-5793-4375-a27a-3d80e7214ac4-kube-api-access-sfsdj\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.314836 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.314918 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.325999 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.417118 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.417453 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.417690 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfsdj\" (UniqueName: \"kubernetes.io/projected/d442904a-5793-4375-a27a-3d80e7214ac4-kube-api-access-sfsdj\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.421261 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.421380 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d442904a-5793-4375-a27a-3d80e7214ac4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.435381 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfsdj\" (UniqueName: \"kubernetes.io/projected/d442904a-5793-4375-a27a-3d80e7214ac4-kube-api-access-sfsdj\") pod \"nova-cell0-conductor-0\" (UID: \"d442904a-5793-4375-a27a-3d80e7214ac4\") " pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:42 crc kubenswrapper[4793]: I0127 20:28:42.619264 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:43 crc kubenswrapper[4793]: I0127 20:28:43.076939 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 20:28:43 crc kubenswrapper[4793]: W0127 20:28:43.078280 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd442904a_5793_4375_a27a_3d80e7214ac4.slice/crio-8663a6f0f7ddca8099d79b4d683022749efaf1e9295eb410138033b144b4636a WatchSource:0}: Error finding container 8663a6f0f7ddca8099d79b4d683022749efaf1e9295eb410138033b144b4636a: Status 404 returned error can't find the container with id 8663a6f0f7ddca8099d79b4d683022749efaf1e9295eb410138033b144b4636a Jan 27 20:28:43 crc kubenswrapper[4793]: I0127 20:28:43.202007 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d442904a-5793-4375-a27a-3d80e7214ac4","Type":"ContainerStarted","Data":"8663a6f0f7ddca8099d79b4d683022749efaf1e9295eb410138033b144b4636a"} Jan 27 20:28:43 crc kubenswrapper[4793]: I0127 20:28:43.203570 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerStarted","Data":"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6"} Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.215276 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerStarted","Data":"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e"} Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.216078 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.218282 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d442904a-5793-4375-a27a-3d80e7214ac4","Type":"ContainerStarted","Data":"dced6b934393cc6ad9ed7138a3bb11e8f1bd9c367358afb025868ffaf2cf311a"} Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.218477 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.252308 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.6494528819999998 podStartE2EDuration="4.252283702s" podCreationTimestamp="2026-01-27 20:28:40 +0000 UTC" firstStartedPulling="2026-01-27 20:28:41.028578423 +0000 UTC m=+1546.418831579" lastFinishedPulling="2026-01-27 20:28:43.631409253 +0000 UTC m=+1549.021662399" observedRunningTime="2026-01-27 20:28:44.240367499 +0000 UTC m=+1549.630620685" watchObservedRunningTime="2026-01-27 20:28:44.252283702 +0000 UTC m=+1549.642536858" Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.278123 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.278103216 podStartE2EDuration="2.278103216s" podCreationTimestamp="2026-01-27 20:28:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:44.268056349 +0000 UTC m=+1549.658309505" watchObservedRunningTime="2026-01-27 20:28:44.278103216 +0000 UTC m=+1549.668356372" Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.426651 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:44 crc kubenswrapper[4793]: I0127 20:28:44.474391 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:45 crc kubenswrapper[4793]: I0127 20:28:45.229688 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:45 crc kubenswrapper[4793]: I0127 20:28:45.258148 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 27 20:28:48 crc kubenswrapper[4793]: I0127 20:28:48.803875 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:28:48 crc kubenswrapper[4793]: E0127 20:28:48.804442 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:28:52 crc kubenswrapper[4793]: I0127 20:28:52.649392 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.250190 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-hszbk"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.251666 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.262891 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-hszbk"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.280933 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.280994 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.314216 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.314483 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.314670 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.314787 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-664rj\" (UniqueName: \"kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.416992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.417081 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-664rj\" (UniqueName: \"kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.417121 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.417209 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.431175 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.439274 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-664rj\" (UniqueName: \"kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.442190 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.451237 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data\") pod \"nova-cell0-cell-mapping-hszbk\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.544322 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.547395 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.560931 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.566295 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.606215 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.607966 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.615958 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.631480 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.648075 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.648387 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.648447 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbg82\" (UniqueName: \"kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.648496 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.675214 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.756952 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.758716 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759537 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759588 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2w8p\" (UniqueName: \"kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759631 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759657 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759679 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759760 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbg82\" (UniqueName: \"kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.759872 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.760065 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.760067 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.777287 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.785678 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.794221 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.817989 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbg82\" (UniqueName: \"kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82\") pod \"nova-api-0\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " pod="openstack/nova-api-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.848233 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.848280 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.850249 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.850278 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.854033 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861510 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vkgd\" (UniqueName: \"kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861644 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861714 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861736 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2w8p\" (UniqueName: \"kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861770 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861792 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861815 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.861827 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.862156 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.862266 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.864289 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.882395 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.893037 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.898345 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2w8p\" (UniqueName: \"kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p\") pod \"nova-metadata-0\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " pod="openstack/nova-metadata-0" Jan 27 20:28:53 crc kubenswrapper[4793]: I0127 20:28:53.898877 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.953570 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968297 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968395 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968442 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968573 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glgwp\" (UniqueName: \"kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968601 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968654 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968690 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968730 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vkgd\" (UniqueName: \"kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968806 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968889 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968913 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:53.968960 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6b9j\" (UniqueName: \"kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.011322 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.011664 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vkgd\" (UniqueName: \"kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.013885 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071018 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071163 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glgwp\" (UniqueName: \"kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071207 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071262 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071319 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071415 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071485 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071517 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6b9j\" (UniqueName: \"kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.071615 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.073135 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.073209 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.073391 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.073997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.075217 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.080829 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.087919 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.091501 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6b9j\" (UniqueName: \"kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j\") pod \"nova-cell1-novncproxy-0\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.105968 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glgwp\" (UniqueName: \"kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp\") pod \"dnsmasq-dns-d6b9584f5-6zm4g\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.205287 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.318867 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.327000 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.384941 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-hszbk"] Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.498433 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8fwdb"] Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.500306 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.502866 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.503089 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.531928 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8fwdb"] Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.626640 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csjfr\" (UniqueName: \"kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.626732 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.642507 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.642609 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.648364 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.749493 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.749992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.750087 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.750317 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csjfr\" (UniqueName: \"kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.753455 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.754433 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.785136 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.793448 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csjfr\" (UniqueName: \"kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.797120 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data\") pod \"nova-cell1-conductor-db-sync-8fwdb\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.940999 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:28:54 crc kubenswrapper[4793]: W0127 20:28:54.949346 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6a4bc6a_42d5_4be9_b35f_d042d71c146f.slice/crio-fe5a8c70213639dbde8fec1639b78ca1801e96756a90733c5873b5b53d9f899a WatchSource:0}: Error finding container fe5a8c70213639dbde8fec1639b78ca1801e96756a90733c5873b5b53d9f899a: Status 404 returned error can't find the container with id fe5a8c70213639dbde8fec1639b78ca1801e96756a90733c5873b5b53d9f899a Jan 27 20:28:54 crc kubenswrapper[4793]: I0127 20:28:54.952265 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.223517 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.266059 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.386611 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a24c236-232f-4bd6-8d89-019058789317","Type":"ContainerStarted","Data":"3512ee444b26108b529144ee1db20fde52eb0126c61a6863ca941726d91ce026"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.388527 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerStarted","Data":"e838079bf79b98d9797a7487e4b759dff5cdd910fd2b0f4482c3ef77822a0b3d"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.394070 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hszbk" event={"ID":"6cf3de60-1232-4828-b1a7-77e1f483bfff","Type":"ContainerStarted","Data":"3912a4d44e5f4f2e1539bb1f0a7a5ecd887fe2a897d7a2c82a811d6f959032c3"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.394114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hszbk" event={"ID":"6cf3de60-1232-4828-b1a7-77e1f483bfff","Type":"ContainerStarted","Data":"1c09d50621dff25a210ff9a80899567f40eca03cbabab48048d1f9244c350ae9"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.398968 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" event={"ID":"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac","Type":"ContainerStarted","Data":"11a240f05aba6fba58cbe79cdba708e539af0a8059d2af9a55f4448fff1a994b"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.408511 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerStarted","Data":"129fc44a5d94995a16b14fc61c70cb91e584b697a40037d5d9e91b3fde493701"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.415914 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f6a4bc6a-42d5-4be9-b35f-d042d71c146f","Type":"ContainerStarted","Data":"fe5a8c70213639dbde8fec1639b78ca1801e96756a90733c5873b5b53d9f899a"} Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.421669 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-hszbk" podStartSLOduration=2.421648072 podStartE2EDuration="2.421648072s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:55.410399563 +0000 UTC m=+1560.800652719" watchObservedRunningTime="2026-01-27 20:28:55.421648072 +0000 UTC m=+1560.811901228" Jan 27 20:28:55 crc kubenswrapper[4793]: I0127 20:28:55.595121 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8fwdb"] Jan 27 20:28:55 crc kubenswrapper[4793]: W0127 20:28:55.603564 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ba52741_6cd6_4a04_9aa3_25a39cd2e6f5.slice/crio-9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d WatchSource:0}: Error finding container 9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d: Status 404 returned error can't find the container with id 9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d Jan 27 20:28:56 crc kubenswrapper[4793]: I0127 20:28:56.438115 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" event={"ID":"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5","Type":"ContainerStarted","Data":"03a624663d3f8252c079e82c5c545675355c90afff8f16a92e4b98ca0e4ac650"} Jan 27 20:28:56 crc kubenswrapper[4793]: I0127 20:28:56.438652 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" event={"ID":"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5","Type":"ContainerStarted","Data":"9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d"} Jan 27 20:28:56 crc kubenswrapper[4793]: I0127 20:28:56.440358 4793 generic.go:334] "Generic (PLEG): container finished" podID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerID="6870cc92f73decd303d4bf2a0f0b1c78ca50b0f9abdd6b0a6b39b661d987fb3e" exitCode=0 Jan 27 20:28:56 crc kubenswrapper[4793]: I0127 20:28:56.440470 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" event={"ID":"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac","Type":"ContainerDied","Data":"6870cc92f73decd303d4bf2a0f0b1c78ca50b0f9abdd6b0a6b39b661d987fb3e"} Jan 27 20:28:56 crc kubenswrapper[4793]: I0127 20:28:56.498494 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" podStartSLOduration=2.498468532 podStartE2EDuration="2.498468532s" podCreationTimestamp="2026-01-27 20:28:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:28:56.453251131 +0000 UTC m=+1561.843504297" watchObservedRunningTime="2026-01-27 20:28:56.498468532 +0000 UTC m=+1561.888721688" Jan 27 20:28:57 crc kubenswrapper[4793]: I0127 20:28:57.589523 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:28:57 crc kubenswrapper[4793]: I0127 20:28:57.604051 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:28:59 crc kubenswrapper[4793]: I0127 20:28:59.804416 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:28:59 crc kubenswrapper[4793]: E0127 20:28:59.805256 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.485568 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerStarted","Data":"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.485897 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerStarted","Data":"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.486028 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-log" containerID="cri-o://a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" gracePeriod=30 Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.486477 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-metadata" containerID="cri-o://88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" gracePeriod=30 Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.502241 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" event={"ID":"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac","Type":"ContainerStarted","Data":"076738602b49bc88fc9f86f069a04488a88a980bbbaf538c27c2cddee88d4eeb"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.502405 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.504856 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerStarted","Data":"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.504890 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerStarted","Data":"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.508498 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f6a4bc6a-42d5-4be9-b35f-d042d71c146f","Type":"ContainerStarted","Data":"6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.511024 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a24c236-232f-4bd6-8d89-019058789317","Type":"ContainerStarted","Data":"756ef12bab39ddd06e8c9afefa8cc304473802c6b77c88cffc79bbf743d899fa"} Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.511126 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5a24c236-232f-4bd6-8d89-019058789317" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://756ef12bab39ddd06e8c9afefa8cc304473802c6b77c88cffc79bbf743d899fa" gracePeriod=30 Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.526727 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.6962491 podStartE2EDuration="7.526700548s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="2026-01-27 20:28:54.670765254 +0000 UTC m=+1560.061018410" lastFinishedPulling="2026-01-27 20:28:59.501216692 +0000 UTC m=+1564.891469858" observedRunningTime="2026-01-27 20:29:00.505752431 +0000 UTC m=+1565.896005597" watchObservedRunningTime="2026-01-27 20:29:00.526700548 +0000 UTC m=+1565.916953704" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.539437 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" podStartSLOduration=7.539420837 podStartE2EDuration="7.539420837s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:00.53506666 +0000 UTC m=+1565.925319826" watchObservedRunningTime="2026-01-27 20:29:00.539420837 +0000 UTC m=+1565.929673993" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.563060 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.870272915 podStartE2EDuration="7.563041184s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="2026-01-27 20:28:54.808446293 +0000 UTC m=+1560.198699449" lastFinishedPulling="2026-01-27 20:28:59.501214562 +0000 UTC m=+1564.891467718" observedRunningTime="2026-01-27 20:29:00.556370388 +0000 UTC m=+1565.946623544" watchObservedRunningTime="2026-01-27 20:29:00.563041184 +0000 UTC m=+1565.953294330" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.578644 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.043835209 podStartE2EDuration="7.578625389s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="2026-01-27 20:28:54.967495631 +0000 UTC m=+1560.357748787" lastFinishedPulling="2026-01-27 20:28:59.502285811 +0000 UTC m=+1564.892538967" observedRunningTime="2026-01-27 20:29:00.577102518 +0000 UTC m=+1565.967355674" watchObservedRunningTime="2026-01-27 20:29:00.578625389 +0000 UTC m=+1565.968878545" Jan 27 20:29:00 crc kubenswrapper[4793]: I0127 20:29:00.605310 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.360586939 podStartE2EDuration="7.605268097s" podCreationTimestamp="2026-01-27 20:28:53 +0000 UTC" firstStartedPulling="2026-01-27 20:28:55.256692148 +0000 UTC m=+1560.646945304" lastFinishedPulling="2026-01-27 20:28:59.501373306 +0000 UTC m=+1564.891626462" observedRunningTime="2026-01-27 20:29:00.595528848 +0000 UTC m=+1565.985782004" watchObservedRunningTime="2026-01-27 20:29:00.605268097 +0000 UTC m=+1565.995521253" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.275598 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.297874 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2w8p\" (UniqueName: \"kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p\") pod \"211c2cd3-959c-48d0-aeaf-84a8595017c6\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.304502 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p" (OuterVolumeSpecName: "kube-api-access-l2w8p") pod "211c2cd3-959c-48d0-aeaf-84a8595017c6" (UID: "211c2cd3-959c-48d0-aeaf-84a8595017c6"). InnerVolumeSpecName "kube-api-access-l2w8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.399760 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data\") pod \"211c2cd3-959c-48d0-aeaf-84a8595017c6\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.399886 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle\") pod \"211c2cd3-959c-48d0-aeaf-84a8595017c6\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.399992 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs\") pod \"211c2cd3-959c-48d0-aeaf-84a8595017c6\" (UID: \"211c2cd3-959c-48d0-aeaf-84a8595017c6\") " Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.400323 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2w8p\" (UniqueName: \"kubernetes.io/projected/211c2cd3-959c-48d0-aeaf-84a8595017c6-kube-api-access-l2w8p\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.400636 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs" (OuterVolumeSpecName: "logs") pod "211c2cd3-959c-48d0-aeaf-84a8595017c6" (UID: "211c2cd3-959c-48d0-aeaf-84a8595017c6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.430537 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "211c2cd3-959c-48d0-aeaf-84a8595017c6" (UID: "211c2cd3-959c-48d0-aeaf-84a8595017c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.432054 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data" (OuterVolumeSpecName: "config-data") pod "211c2cd3-959c-48d0-aeaf-84a8595017c6" (UID: "211c2cd3-959c-48d0-aeaf-84a8595017c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.501571 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/211c2cd3-959c-48d0-aeaf-84a8595017c6-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.501604 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.501613 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/211c2cd3-959c-48d0-aeaf-84a8595017c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.525889 4793 generic.go:334] "Generic (PLEG): container finished" podID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerID="88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" exitCode=0 Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.526152 4793 generic.go:334] "Generic (PLEG): container finished" podID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerID="a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" exitCode=143 Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.526005 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.525955 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerDied","Data":"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73"} Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.526747 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerDied","Data":"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb"} Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.526774 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"211c2cd3-959c-48d0-aeaf-84a8595017c6","Type":"ContainerDied","Data":"e838079bf79b98d9797a7487e4b759dff5cdd910fd2b0f4482c3ef77822a0b3d"} Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.526792 4793 scope.go:117] "RemoveContainer" containerID="88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.589177 4793 scope.go:117] "RemoveContainer" containerID="a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.611825 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.622793 4793 scope.go:117] "RemoveContainer" containerID="88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" Jan 27 20:29:01 crc kubenswrapper[4793]: E0127 20:29:01.624153 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73\": container with ID starting with 88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73 not found: ID does not exist" containerID="88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.624215 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73"} err="failed to get container status \"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73\": rpc error: code = NotFound desc = could not find container \"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73\": container with ID starting with 88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73 not found: ID does not exist" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.624242 4793 scope.go:117] "RemoveContainer" containerID="a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" Jan 27 20:29:01 crc kubenswrapper[4793]: E0127 20:29:01.624587 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb\": container with ID starting with a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb not found: ID does not exist" containerID="a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.624611 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb"} err="failed to get container status \"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb\": rpc error: code = NotFound desc = could not find container \"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb\": container with ID starting with a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb not found: ID does not exist" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.624625 4793 scope.go:117] "RemoveContainer" containerID="88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.631005 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.631286 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73"} err="failed to get container status \"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73\": rpc error: code = NotFound desc = could not find container \"88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73\": container with ID starting with 88b766bc4281cf62e7a3a46f00099aa980b0e42ee1d94418b74cff0edd667d73 not found: ID does not exist" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.631440 4793 scope.go:117] "RemoveContainer" containerID="a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.631885 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb"} err="failed to get container status \"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb\": rpc error: code = NotFound desc = could not find container \"a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb\": container with ID starting with a676bd05ee4913fb22d577b38cf328b773dcb10b39055053210dc623533f93eb not found: ID does not exist" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.636724 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:01 crc kubenswrapper[4793]: E0127 20:29:01.637375 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-metadata" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.637423 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-metadata" Jan 27 20:29:01 crc kubenswrapper[4793]: E0127 20:29:01.637445 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-log" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.637453 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-log" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.637817 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-metadata" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.637910 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" containerName="nova-metadata-log" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.639677 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.642328 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.642331 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.646468 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.707925 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvzxc\" (UniqueName: \"kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.708082 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.708290 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.708445 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.708510 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.809828 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.809893 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.810018 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvzxc\" (UniqueName: \"kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.810124 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.810337 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.810890 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.814834 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.814981 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.815302 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.830269 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="211c2cd3-959c-48d0-aeaf-84a8595017c6" path="/var/lib/kubelet/pods/211c2cd3-959c-48d0-aeaf-84a8595017c6/volumes" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.842703 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvzxc\" (UniqueName: \"kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc\") pod \"nova-metadata-0\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " pod="openstack/nova-metadata-0" Jan 27 20:29:01 crc kubenswrapper[4793]: I0127 20:29:01.971110 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:02 crc kubenswrapper[4793]: W0127 20:29:02.495002 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4161b850_cbb0_4116_abbc_5889d260a955.slice/crio-73c5aa884bbdaaa20c148cf685c54198d2c4b83eb4dda69ec09e06d8d748c77d WatchSource:0}: Error finding container 73c5aa884bbdaaa20c148cf685c54198d2c4b83eb4dda69ec09e06d8d748c77d: Status 404 returned error can't find the container with id 73c5aa884bbdaaa20c148cf685c54198d2c4b83eb4dda69ec09e06d8d748c77d Jan 27 20:29:02 crc kubenswrapper[4793]: I0127 20:29:02.495702 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:02 crc kubenswrapper[4793]: I0127 20:29:02.541891 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerStarted","Data":"73c5aa884bbdaaa20c148cf685c54198d2c4b83eb4dda69ec09e06d8d748c77d"} Jan 27 20:29:03 crc kubenswrapper[4793]: I0127 20:29:03.556086 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerStarted","Data":"a01cba44401102c69b05a1cb804d2d35fd0ebd1eca80440e7dbc3cddb0598b1a"} Jan 27 20:29:03 crc kubenswrapper[4793]: I0127 20:29:03.556643 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerStarted","Data":"64daf0dde0e1a7e72f79b427a9b45cefb3ec7d949135e1e3676e214b00d13589"} Jan 27 20:29:03 crc kubenswrapper[4793]: I0127 20:29:03.588762 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.588706883 podStartE2EDuration="2.588706883s" podCreationTimestamp="2026-01-27 20:29:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:03.572252246 +0000 UTC m=+1568.962505402" watchObservedRunningTime="2026-01-27 20:29:03.588706883 +0000 UTC m=+1568.978960039" Jan 27 20:29:03 crc kubenswrapper[4793]: I0127 20:29:03.899409 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:03 crc kubenswrapper[4793]: I0127 20:29:03.899467 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.206194 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.206666 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.243998 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.321682 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.328463 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.521240 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.522720 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="dnsmasq-dns" containerID="cri-o://178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f" gracePeriod=10 Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.610679 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.662371 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.180:5353: connect: connection refused" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.981848 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.203:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:04 crc kubenswrapper[4793]: I0127 20:29:04.981801 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.203:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.167852 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.342708 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.342801 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.342871 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.342903 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.343163 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztch8\" (UniqueName: \"kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.343220 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.367480 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8" (OuterVolumeSpecName: "kube-api-access-ztch8") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "kube-api-access-ztch8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.407170 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.415056 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.424338 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: E0127 20:29:05.437318 4793 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config podName:7e6e1e12-0db9-4959-a685-bed3c5382fa8 nodeName:}" failed. No retries permitted until 2026-01-27 20:29:05.937248076 +0000 UTC m=+1571.327501242 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config" (UniqueName: "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8") : error deleting /var/lib/kubelet/pods/7e6e1e12-0db9-4959-a685-bed3c5382fa8/volume-subpaths: remove /var/lib/kubelet/pods/7e6e1e12-0db9-4959-a685-bed3c5382fa8/volume-subpaths: no such file or directory Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.437619 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.445499 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztch8\" (UniqueName: \"kubernetes.io/projected/7e6e1e12-0db9-4959-a685-bed3c5382fa8-kube-api-access-ztch8\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.445563 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.445579 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.445595 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.445606 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.582356 4793 generic.go:334] "Generic (PLEG): container finished" podID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerID="178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f" exitCode=0 Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.582459 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.582510 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" event={"ID":"7e6e1e12-0db9-4959-a685-bed3c5382fa8","Type":"ContainerDied","Data":"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f"} Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.582564 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57459cb9f9-4xdq5" event={"ID":"7e6e1e12-0db9-4959-a685-bed3c5382fa8","Type":"ContainerDied","Data":"e04cfba649278389579f720a7c6da9d15dceb178f1ddac31750f0dd790cb949c"} Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.582584 4793 scope.go:117] "RemoveContainer" containerID="178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.607337 4793 scope.go:117] "RemoveContainer" containerID="d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.631186 4793 scope.go:117] "RemoveContainer" containerID="178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f" Jan 27 20:29:05 crc kubenswrapper[4793]: E0127 20:29:05.632702 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f\": container with ID starting with 178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f not found: ID does not exist" containerID="178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.632743 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f"} err="failed to get container status \"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f\": rpc error: code = NotFound desc = could not find container \"178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f\": container with ID starting with 178931f45dcf21bed8aecb4d90a62095e8c51d7087de094e451fcfcdb1aa718f not found: ID does not exist" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.632774 4793 scope.go:117] "RemoveContainer" containerID="d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c" Jan 27 20:29:05 crc kubenswrapper[4793]: E0127 20:29:05.633239 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c\": container with ID starting with d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c not found: ID does not exist" containerID="d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.633277 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c"} err="failed to get container status \"d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c\": rpc error: code = NotFound desc = could not find container \"d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c\": container with ID starting with d9b74e9fb3bb54a7f5cfe46754f70a091508a9869c1c54a5bea363864b268b2c not found: ID does not exist" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.955975 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") pod \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\" (UID: \"7e6e1e12-0db9-4959-a685-bed3c5382fa8\") " Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.956623 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config" (OuterVolumeSpecName: "config") pod "7e6e1e12-0db9-4959-a685-bed3c5382fa8" (UID: "7e6e1e12-0db9-4959-a685-bed3c5382fa8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:05 crc kubenswrapper[4793]: I0127 20:29:05.957007 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e6e1e12-0db9-4959-a685-bed3c5382fa8-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.261597 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.273814 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57459cb9f9-4xdq5"] Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.593741 4793 generic.go:334] "Generic (PLEG): container finished" podID="6cf3de60-1232-4828-b1a7-77e1f483bfff" containerID="3912a4d44e5f4f2e1539bb1f0a7a5ecd887fe2a897d7a2c82a811d6f959032c3" exitCode=0 Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.593812 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hszbk" event={"ID":"6cf3de60-1232-4828-b1a7-77e1f483bfff","Type":"ContainerDied","Data":"3912a4d44e5f4f2e1539bb1f0a7a5ecd887fe2a897d7a2c82a811d6f959032c3"} Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.972021 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:29:06 crc kubenswrapper[4793]: I0127 20:29:06.972074 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:29:07 crc kubenswrapper[4793]: I0127 20:29:07.818597 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" path="/var/lib/kubelet/pods/7e6e1e12-0db9-4959-a685-bed3c5382fa8/volumes" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.055188 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.235542 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle\") pod \"6cf3de60-1232-4828-b1a7-77e1f483bfff\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.235852 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-664rj\" (UniqueName: \"kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj\") pod \"6cf3de60-1232-4828-b1a7-77e1f483bfff\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.236095 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data\") pod \"6cf3de60-1232-4828-b1a7-77e1f483bfff\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.236139 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts\") pod \"6cf3de60-1232-4828-b1a7-77e1f483bfff\" (UID: \"6cf3de60-1232-4828-b1a7-77e1f483bfff\") " Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.241235 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts" (OuterVolumeSpecName: "scripts") pod "6cf3de60-1232-4828-b1a7-77e1f483bfff" (UID: "6cf3de60-1232-4828-b1a7-77e1f483bfff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.241671 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj" (OuterVolumeSpecName: "kube-api-access-664rj") pod "6cf3de60-1232-4828-b1a7-77e1f483bfff" (UID: "6cf3de60-1232-4828-b1a7-77e1f483bfff"). InnerVolumeSpecName "kube-api-access-664rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.266262 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cf3de60-1232-4828-b1a7-77e1f483bfff" (UID: "6cf3de60-1232-4828-b1a7-77e1f483bfff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.277429 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data" (OuterVolumeSpecName: "config-data") pod "6cf3de60-1232-4828-b1a7-77e1f483bfff" (UID: "6cf3de60-1232-4828-b1a7-77e1f483bfff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.339143 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-664rj\" (UniqueName: \"kubernetes.io/projected/6cf3de60-1232-4828-b1a7-77e1f483bfff-kube-api-access-664rj\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.339198 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.339211 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.339223 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf3de60-1232-4828-b1a7-77e1f483bfff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.643476 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hszbk" event={"ID":"6cf3de60-1232-4828-b1a7-77e1f483bfff","Type":"ContainerDied","Data":"1c09d50621dff25a210ff9a80899567f40eca03cbabab48048d1f9244c350ae9"} Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.643796 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hszbk" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.643876 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c09d50621dff25a210ff9a80899567f40eca03cbabab48048d1f9244c350ae9" Jan 27 20:29:08 crc kubenswrapper[4793]: E0127 20:29:08.753588 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cf3de60_1232_4828_b1a7_77e1f483bfff.slice/crio-1c09d50621dff25a210ff9a80899567f40eca03cbabab48048d1f9244c350ae9\": RecentStats: unable to find data in memory cache]" Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.954230 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.954475 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-log" containerID="cri-o://42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0" gracePeriod=30 Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.955113 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-api" containerID="cri-o://88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023" gracePeriod=30 Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.985364 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:08 crc kubenswrapper[4793]: I0127 20:29:08.985654 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerName="nova-scheduler-scheduler" containerID="cri-o://6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" gracePeriod=30 Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.171005 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.171501 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-log" containerID="cri-o://64daf0dde0e1a7e72f79b427a9b45cefb3ec7d949135e1e3676e214b00d13589" gracePeriod=30 Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.171649 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-metadata" containerID="cri-o://a01cba44401102c69b05a1cb804d2d35fd0ebd1eca80440e7dbc3cddb0598b1a" gracePeriod=30 Jan 27 20:29:09 crc kubenswrapper[4793]: E0127 20:29:09.208740 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:09 crc kubenswrapper[4793]: E0127 20:29:09.209981 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:09 crc kubenswrapper[4793]: E0127 20:29:09.211155 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:09 crc kubenswrapper[4793]: E0127 20:29:09.211195 4793 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerName="nova-scheduler-scheduler" Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.661215 4793 generic.go:334] "Generic (PLEG): container finished" podID="4f944650-8c63-483f-9b8a-2ced512318e6" containerID="42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0" exitCode=143 Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.661774 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerDied","Data":"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0"} Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.664339 4793 generic.go:334] "Generic (PLEG): container finished" podID="4161b850-cbb0-4116-abbc-5889d260a955" containerID="a01cba44401102c69b05a1cb804d2d35fd0ebd1eca80440e7dbc3cddb0598b1a" exitCode=0 Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.664675 4793 generic.go:334] "Generic (PLEG): container finished" podID="4161b850-cbb0-4116-abbc-5889d260a955" containerID="64daf0dde0e1a7e72f79b427a9b45cefb3ec7d949135e1e3676e214b00d13589" exitCode=143 Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.664796 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerDied","Data":"a01cba44401102c69b05a1cb804d2d35fd0ebd1eca80440e7dbc3cddb0598b1a"} Jan 27 20:29:09 crc kubenswrapper[4793]: I0127 20:29:09.665026 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerDied","Data":"64daf0dde0e1a7e72f79b427a9b45cefb3ec7d949135e1e3676e214b00d13589"} Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.011359 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.130783 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data\") pod \"4161b850-cbb0-4116-abbc-5889d260a955\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.130995 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs\") pod \"4161b850-cbb0-4116-abbc-5889d260a955\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.131131 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvzxc\" (UniqueName: \"kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc\") pod \"4161b850-cbb0-4116-abbc-5889d260a955\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.131168 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle\") pod \"4161b850-cbb0-4116-abbc-5889d260a955\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.131288 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs\") pod \"4161b850-cbb0-4116-abbc-5889d260a955\" (UID: \"4161b850-cbb0-4116-abbc-5889d260a955\") " Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.132167 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs" (OuterVolumeSpecName: "logs") pod "4161b850-cbb0-4116-abbc-5889d260a955" (UID: "4161b850-cbb0-4116-abbc-5889d260a955"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.137394 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc" (OuterVolumeSpecName: "kube-api-access-mvzxc") pod "4161b850-cbb0-4116-abbc-5889d260a955" (UID: "4161b850-cbb0-4116-abbc-5889d260a955"). InnerVolumeSpecName "kube-api-access-mvzxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.160972 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data" (OuterVolumeSpecName: "config-data") pod "4161b850-cbb0-4116-abbc-5889d260a955" (UID: "4161b850-cbb0-4116-abbc-5889d260a955"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.161364 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4161b850-cbb0-4116-abbc-5889d260a955" (UID: "4161b850-cbb0-4116-abbc-5889d260a955"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.211890 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "4161b850-cbb0-4116-abbc-5889d260a955" (UID: "4161b850-cbb0-4116-abbc-5889d260a955"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.233866 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvzxc\" (UniqueName: \"kubernetes.io/projected/4161b850-cbb0-4116-abbc-5889d260a955-kube-api-access-mvzxc\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.233914 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.233927 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4161b850-cbb0-4116-abbc-5889d260a955-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.233940 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:10 crc kubenswrapper[4793]: I0127 20:29:10.233951 4793 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4161b850-cbb0-4116-abbc-5889d260a955-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.183236 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.216889 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4161b850-cbb0-4116-abbc-5889d260a955","Type":"ContainerDied","Data":"73c5aa884bbdaaa20c148cf685c54198d2c4b83eb4dda69ec09e06d8d748c77d"} Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.216958 4793 scope.go:117] "RemoveContainer" containerID="a01cba44401102c69b05a1cb804d2d35fd0ebd1eca80440e7dbc3cddb0598b1a" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.217130 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.293301 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.315348 4793 scope.go:117] "RemoveContainer" containerID="64daf0dde0e1a7e72f79b427a9b45cefb3ec7d949135e1e3676e214b00d13589" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.327864 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.340947 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:11 crc kubenswrapper[4793]: E0127 20:29:11.341442 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf3de60-1232-4828-b1a7-77e1f483bfff" containerName="nova-manage" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341457 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf3de60-1232-4828-b1a7-77e1f483bfff" containerName="nova-manage" Jan 27 20:29:11 crc kubenswrapper[4793]: E0127 20:29:11.341468 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="init" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341473 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="init" Jan 27 20:29:11 crc kubenswrapper[4793]: E0127 20:29:11.341500 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-metadata" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341508 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-metadata" Jan 27 20:29:11 crc kubenswrapper[4793]: E0127 20:29:11.341527 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="dnsmasq-dns" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341532 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="dnsmasq-dns" Jan 27 20:29:11 crc kubenswrapper[4793]: E0127 20:29:11.341559 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-log" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341565 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-log" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341761 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-log" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341774 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4161b850-cbb0-4116-abbc-5889d260a955" containerName="nova-metadata-metadata" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341784 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cf3de60-1232-4828-b1a7-77e1f483bfff" containerName="nova-manage" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.341800 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e6e1e12-0db9-4959-a685-bed3c5382fa8" containerName="dnsmasq-dns" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.342910 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.347052 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.347129 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.347930 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.450778 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.451140 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.451347 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.451458 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9cz4\" (UniqueName: \"kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.451650 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.553599 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.553682 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.553739 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.553776 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9cz4\" (UniqueName: \"kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.553885 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.559165 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.562647 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.562672 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.566351 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.577805 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9cz4\" (UniqueName: \"kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4\") pod \"nova-metadata-0\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.674562 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.700780 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.757777 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle\") pod \"4f944650-8c63-483f-9b8a-2ced512318e6\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.757998 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data\") pod \"4f944650-8c63-483f-9b8a-2ced512318e6\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.758036 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs\") pod \"4f944650-8c63-483f-9b8a-2ced512318e6\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.758169 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbg82\" (UniqueName: \"kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82\") pod \"4f944650-8c63-483f-9b8a-2ced512318e6\" (UID: \"4f944650-8c63-483f-9b8a-2ced512318e6\") " Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.759215 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs" (OuterVolumeSpecName: "logs") pod "4f944650-8c63-483f-9b8a-2ced512318e6" (UID: "4f944650-8c63-483f-9b8a-2ced512318e6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.761299 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f944650-8c63-483f-9b8a-2ced512318e6-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.763016 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82" (OuterVolumeSpecName: "kube-api-access-kbg82") pod "4f944650-8c63-483f-9b8a-2ced512318e6" (UID: "4f944650-8c63-483f-9b8a-2ced512318e6"). InnerVolumeSpecName "kube-api-access-kbg82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.786874 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data" (OuterVolumeSpecName: "config-data") pod "4f944650-8c63-483f-9b8a-2ced512318e6" (UID: "4f944650-8c63-483f-9b8a-2ced512318e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.933597 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbg82\" (UniqueName: \"kubernetes.io/projected/4f944650-8c63-483f-9b8a-2ced512318e6-kube-api-access-kbg82\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.933630 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.938988 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f944650-8c63-483f-9b8a-2ced512318e6" (UID: "4f944650-8c63-483f-9b8a-2ced512318e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:11 crc kubenswrapper[4793]: I0127 20:29:11.969295 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4161b850-cbb0-4116-abbc-5889d260a955" path="/var/lib/kubelet/pods/4161b850-cbb0-4116-abbc-5889d260a955/volumes" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.037373 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f944650-8c63-483f-9b8a-2ced512318e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.232129 4793 generic.go:334] "Generic (PLEG): container finished" podID="4f944650-8c63-483f-9b8a-2ced512318e6" containerID="88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023" exitCode=0 Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.232184 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerDied","Data":"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023"} Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.232216 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f944650-8c63-483f-9b8a-2ced512318e6","Type":"ContainerDied","Data":"129fc44a5d94995a16b14fc61c70cb91e584b697a40037d5d9e91b3fde493701"} Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.232237 4793 scope.go:117] "RemoveContainer" containerID="88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.232372 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.260187 4793 scope.go:117] "RemoveContainer" containerID="42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.282983 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.306099 4793 scope.go:117] "RemoveContainer" containerID="88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023" Jan 27 20:29:12 crc kubenswrapper[4793]: E0127 20:29:12.309727 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023\": container with ID starting with 88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023 not found: ID does not exist" containerID="88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.309783 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023"} err="failed to get container status \"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023\": rpc error: code = NotFound desc = could not find container \"88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023\": container with ID starting with 88a1cf4917b676d7beb77522ba61e7ad0b4d052038aa5eb3179bc10305d6e023 not found: ID does not exist" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.309812 4793 scope.go:117] "RemoveContainer" containerID="42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0" Jan 27 20:29:12 crc kubenswrapper[4793]: E0127 20:29:12.313557 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0\": container with ID starting with 42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0 not found: ID does not exist" containerID="42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.313611 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0"} err="failed to get container status \"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0\": rpc error: code = NotFound desc = could not find container \"42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0\": container with ID starting with 42d47182f74e0c300737bb85f3cf958bd659f616b08c6b449561c5c419cce0c0 not found: ID does not exist" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.313672 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.329405 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:12 crc kubenswrapper[4793]: E0127 20:29:12.329984 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-log" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.330002 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-log" Jan 27 20:29:12 crc kubenswrapper[4793]: E0127 20:29:12.330014 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-api" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.330020 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-api" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.330215 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-log" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.330238 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" containerName="nova-api-api" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.331402 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.334906 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.340672 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:12 crc kubenswrapper[4793]: W0127 20:29:12.341382 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6ea88dd_da8a_4657_8785_79b5d30c14e9.slice/crio-1cf4822ba61f1d0784df7b576acbe2eb039fead86d951eadb3eb35622fc7a416 WatchSource:0}: Error finding container 1cf4822ba61f1d0784df7b576acbe2eb039fead86d951eadb3eb35622fc7a416: Status 404 returned error can't find the container with id 1cf4822ba61f1d0784df7b576acbe2eb039fead86d951eadb3eb35622fc7a416 Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.356095 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.452787 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.452911 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.452977 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l467w\" (UniqueName: \"kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.453084 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.555102 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l467w\" (UniqueName: \"kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.555244 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.555352 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.555421 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.555974 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.560512 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.562002 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.576113 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l467w\" (UniqueName: \"kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w\") pod \"nova-api-0\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " pod="openstack/nova-api-0" Jan 27 20:29:12 crc kubenswrapper[4793]: I0127 20:29:12.866149 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.251073 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerStarted","Data":"893639eaa533e9276b5f8f9d4cfe6aa32461ca5d9e7224615d38299efb3ed1ec"} Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.251401 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerStarted","Data":"76d975de65f09e3d1d3dd72caddb10aee288896ba6f01a46cc189dcf041cc962"} Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.251417 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerStarted","Data":"1cf4822ba61f1d0784df7b576acbe2eb039fead86d951eadb3eb35622fc7a416"} Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.261308 4793 generic.go:334] "Generic (PLEG): container finished" podID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerID="6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" exitCode=0 Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.261349 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f6a4bc6a-42d5-4be9-b35f-d042d71c146f","Type":"ContainerDied","Data":"6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13"} Jan 27 20:29:13 crc kubenswrapper[4793]: W0127 20:29:13.287169 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd75e0794_645e_437f_8553_777f0883e298.slice/crio-a8e52973511ddb0f712592328e0077e1388adc0b4f3d9ad462674b22257012bb WatchSource:0}: Error finding container a8e52973511ddb0f712592328e0077e1388adc0b4f3d9ad462674b22257012bb: Status 404 returned error can't find the container with id a8e52973511ddb0f712592328e0077e1388adc0b4f3d9ad462674b22257012bb Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.295624 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.296856 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.296830265 podStartE2EDuration="2.296830265s" podCreationTimestamp="2026-01-27 20:29:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:13.27748577 +0000 UTC m=+1578.667738926" watchObservedRunningTime="2026-01-27 20:29:13.296830265 +0000 UTC m=+1578.687083431" Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.686873 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.953817 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data\") pod \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.954238 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vkgd\" (UniqueName: \"kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd\") pod \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.954355 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle\") pod \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\" (UID: \"f6a4bc6a-42d5-4be9-b35f-d042d71c146f\") " Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.964596 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:29:13 crc kubenswrapper[4793]: E0127 20:29:13.964838 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.981790 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd" (OuterVolumeSpecName: "kube-api-access-2vkgd") pod "f6a4bc6a-42d5-4be9-b35f-d042d71c146f" (UID: "f6a4bc6a-42d5-4be9-b35f-d042d71c146f"). InnerVolumeSpecName "kube-api-access-2vkgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:13 crc kubenswrapper[4793]: I0127 20:29:13.984138 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f944650-8c63-483f-9b8a-2ced512318e6" path="/var/lib/kubelet/pods/4f944650-8c63-483f-9b8a-2ced512318e6/volumes" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.016604 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6a4bc6a-42d5-4be9-b35f-d042d71c146f" (UID: "f6a4bc6a-42d5-4be9-b35f-d042d71c146f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.057751 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vkgd\" (UniqueName: \"kubernetes.io/projected/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-kube-api-access-2vkgd\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.057787 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.059031 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data" (OuterVolumeSpecName: "config-data") pod "f6a4bc6a-42d5-4be9-b35f-d042d71c146f" (UID: "f6a4bc6a-42d5-4be9-b35f-d042d71c146f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.159318 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6a4bc6a-42d5-4be9-b35f-d042d71c146f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.301158 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerStarted","Data":"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc"} Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.301214 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerStarted","Data":"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8"} Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.301229 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerStarted","Data":"a8e52973511ddb0f712592328e0077e1388adc0b4f3d9ad462674b22257012bb"} Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.306589 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.308763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f6a4bc6a-42d5-4be9-b35f-d042d71c146f","Type":"ContainerDied","Data":"fe5a8c70213639dbde8fec1639b78ca1801e96756a90733c5873b5b53d9f899a"} Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.308908 4793 scope.go:117] "RemoveContainer" containerID="6940b90ac3c7b47d73edeb00022206242e6239dab26ce9b30f9557e9672ffa13" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.335831 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.335810909 podStartE2EDuration="2.335810909s" podCreationTimestamp="2026-01-27 20:29:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:14.329470661 +0000 UTC m=+1579.719723817" watchObservedRunningTime="2026-01-27 20:29:14.335810909 +0000 UTC m=+1579.726064065" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.383618 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.393700 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.405379 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:14 crc kubenswrapper[4793]: E0127 20:29:14.406053 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerName="nova-scheduler-scheduler" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.406073 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerName="nova-scheduler-scheduler" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.406321 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" containerName="nova-scheduler-scheduler" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.407233 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.409296 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.415280 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.474982 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqlg9\" (UniqueName: \"kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.475322 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.475451 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.577642 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.577773 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.577912 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqlg9\" (UniqueName: \"kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.584220 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.597602 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.604262 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqlg9\" (UniqueName: \"kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9\") pod \"nova-scheduler-0\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " pod="openstack/nova-scheduler-0" Jan 27 20:29:14 crc kubenswrapper[4793]: I0127 20:29:14.741215 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:29:15 crc kubenswrapper[4793]: I0127 20:29:15.595084 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:15 crc kubenswrapper[4793]: I0127 20:29:15.817033 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6a4bc6a-42d5-4be9-b35f-d042d71c146f" path="/var/lib/kubelet/pods/f6a4bc6a-42d5-4be9-b35f-d042d71c146f/volumes" Jan 27 20:29:16 crc kubenswrapper[4793]: I0127 20:29:16.343420 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c0bda9f-a25e-4618-9a84-a2393d93a9e9","Type":"ContainerStarted","Data":"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15"} Jan 27 20:29:16 crc kubenswrapper[4793]: I0127 20:29:16.343469 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c0bda9f-a25e-4618-9a84-a2393d93a9e9","Type":"ContainerStarted","Data":"fd142dc22f11af15c62d165bf7d7ed8799d002e6d23f4ee3753825d7c132584b"} Jan 27 20:29:16 crc kubenswrapper[4793]: I0127 20:29:16.367762 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.367733766 podStartE2EDuration="2.367733766s" podCreationTimestamp="2026-01-27 20:29:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:16.362754604 +0000 UTC m=+1581.753007770" watchObservedRunningTime="2026-01-27 20:29:16.367733766 +0000 UTC m=+1581.757986922" Jan 27 20:29:16 crc kubenswrapper[4793]: I0127 20:29:16.675372 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:29:16 crc kubenswrapper[4793]: I0127 20:29:16.675434 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:29:17 crc kubenswrapper[4793]: I0127 20:29:17.731371 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:17 crc kubenswrapper[4793]: I0127 20:29:17.732009 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" containerName="kube-state-metrics" containerID="cri-o://83b261d127c85db97c4dc8d55e89046cbb9ea938c774c6f8eb8579770e1768eb" gracePeriod=30 Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.371863 4793 generic.go:334] "Generic (PLEG): container finished" podID="5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" containerID="03a624663d3f8252c079e82c5c545675355c90afff8f16a92e4b98ca0e4ac650" exitCode=0 Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.372165 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" event={"ID":"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5","Type":"ContainerDied","Data":"03a624663d3f8252c079e82c5c545675355c90afff8f16a92e4b98ca0e4ac650"} Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.376918 4793 generic.go:334] "Generic (PLEG): container finished" podID="58b757ab-b790-4b71-888f-49d52dc5e80d" containerID="83b261d127c85db97c4dc8d55e89046cbb9ea938c774c6f8eb8579770e1768eb" exitCode=2 Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.376960 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"58b757ab-b790-4b71-888f-49d52dc5e80d","Type":"ContainerDied","Data":"83b261d127c85db97c4dc8d55e89046cbb9ea938c774c6f8eb8579770e1768eb"} Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.514438 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.715214 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwk9t\" (UniqueName: \"kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t\") pod \"58b757ab-b790-4b71-888f-49d52dc5e80d\" (UID: \"58b757ab-b790-4b71-888f-49d52dc5e80d\") " Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.722209 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t" (OuterVolumeSpecName: "kube-api-access-bwk9t") pod "58b757ab-b790-4b71-888f-49d52dc5e80d" (UID: "58b757ab-b790-4b71-888f-49d52dc5e80d"). InnerVolumeSpecName "kube-api-access-bwk9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:18 crc kubenswrapper[4793]: I0127 20:29:18.817461 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwk9t\" (UniqueName: \"kubernetes.io/projected/58b757ab-b790-4b71-888f-49d52dc5e80d-kube-api-access-bwk9t\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.457940 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.457935 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"58b757ab-b790-4b71-888f-49d52dc5e80d","Type":"ContainerDied","Data":"e02baa07974a26fa24c1db4eddba5dd64c74e6f6ce79d96ff73373a40c9e27da"} Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.458067 4793 scope.go:117] "RemoveContainer" containerID="83b261d127c85db97c4dc8d55e89046cbb9ea938c774c6f8eb8579770e1768eb" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.551962 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.558811 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.576728 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:19 crc kubenswrapper[4793]: E0127 20:29:19.577295 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" containerName="kube-state-metrics" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.577323 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" containerName="kube-state-metrics" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.577580 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" containerName="kube-state-metrics" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.578364 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.583218 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.587213 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.628682 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.651276 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.651358 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-974bs\" (UniqueName: \"kubernetes.io/projected/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-api-access-974bs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.651452 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.651518 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.741930 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.754042 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.754108 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-974bs\" (UniqueName: \"kubernetes.io/projected/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-api-access-974bs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.754185 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.754211 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.779585 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.779666 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.780357 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.808855 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-974bs\" (UniqueName: \"kubernetes.io/projected/c524394c-23d4-4fb4-b41f-0b3151bae4d1-kube-api-access-974bs\") pod \"kube-state-metrics-0\" (UID: \"c524394c-23d4-4fb4-b41f-0b3151bae4d1\") " pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.826608 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58b757ab-b790-4b71-888f-49d52dc5e80d" path="/var/lib/kubelet/pods/58b757ab-b790-4b71-888f-49d52dc5e80d/volumes" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.898606 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 20:29:19 crc kubenswrapper[4793]: I0127 20:29:19.991222 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.164008 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts\") pod \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.164092 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data\") pod \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.164154 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle\") pod \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.164348 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csjfr\" (UniqueName: \"kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr\") pod \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\" (UID: \"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5\") " Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.173930 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr" (OuterVolumeSpecName: "kube-api-access-csjfr") pod "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" (UID: "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5"). InnerVolumeSpecName "kube-api-access-csjfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.175260 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts" (OuterVolumeSpecName: "scripts") pod "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" (UID: "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.203431 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" (UID: "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.208687 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data" (OuterVolumeSpecName: "config-data") pod "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" (UID: "5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.279594 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csjfr\" (UniqueName: \"kubernetes.io/projected/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-kube-api-access-csjfr\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.279654 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.279671 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.279689 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.387821 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 20:29:20 crc kubenswrapper[4793]: W0127 20:29:20.391368 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc524394c_23d4_4fb4_b41f_0b3151bae4d1.slice/crio-07407b8d2b1fa6a7d26dae34adeba0cdf3459b90c361319cfe800f4b284061e9 WatchSource:0}: Error finding container 07407b8d2b1fa6a7d26dae34adeba0cdf3459b90c361319cfe800f4b284061e9: Status 404 returned error can't find the container with id 07407b8d2b1fa6a7d26dae34adeba0cdf3459b90c361319cfe800f4b284061e9 Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.726163 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" event={"ID":"5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5","Type":"ContainerDied","Data":"9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d"} Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.726246 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9961ad76cbb645091a9b7d88438013d1f9f34a2e5d53f313f700660d4f011c0d" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.726362 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-8fwdb" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.740811 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c524394c-23d4-4fb4-b41f-0b3151bae4d1","Type":"ContainerStarted","Data":"07407b8d2b1fa6a7d26dae34adeba0cdf3459b90c361319cfe800f4b284061e9"} Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.749261 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 20:29:20 crc kubenswrapper[4793]: E0127 20:29:20.749794 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" containerName="nova-cell1-conductor-db-sync" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.749807 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" containerName="nova-cell1-conductor-db-sync" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.750029 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" containerName="nova-cell1-conductor-db-sync" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.750824 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.753990 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.761782 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.892536 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.892616 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.892778 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrghj\" (UniqueName: \"kubernetes.io/projected/226a19ba-8411-43a8-966c-f1ea2d67a5bd-kube-api-access-rrghj\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.995166 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrghj\" (UniqueName: \"kubernetes.io/projected/226a19ba-8411-43a8-966c-f1ea2d67a5bd-kube-api-access-rrghj\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.995264 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:20 crc kubenswrapper[4793]: I0127 20:29:20.995293 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.000972 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.001173 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a19ba-8411-43a8-966c-f1ea2d67a5bd-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.013992 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrghj\" (UniqueName: \"kubernetes.io/projected/226a19ba-8411-43a8-966c-f1ea2d67a5bd-kube-api-access-rrghj\") pod \"nova-cell1-conductor-0\" (UID: \"226a19ba-8411-43a8-966c-f1ea2d67a5bd\") " pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.133501 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.404043 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.404633 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-central-agent" containerID="cri-o://9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c" gracePeriod=30 Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.404771 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="proxy-httpd" containerID="cri-o://0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e" gracePeriod=30 Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.404818 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="sg-core" containerID="cri-o://3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6" gracePeriod=30 Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.404903 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-notification-agent" containerID="cri-o://3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3" gracePeriod=30 Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.708508 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.836820 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.962528 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.981950 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c524394c-23d4-4fb4-b41f-0b3151bae4d1","Type":"ContainerStarted","Data":"d87d6fb6053f080e6d701b122751d4188e1e50cb1e2a7407714de39b9a6aa0eb"} Jan 27 20:29:21 crc kubenswrapper[4793]: I0127 20:29:21.983710 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.011910 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.42192414 podStartE2EDuration="3.011888701s" podCreationTimestamp="2026-01-27 20:29:19 +0000 UTC" firstStartedPulling="2026-01-27 20:29:20.394136863 +0000 UTC m=+1585.784390019" lastFinishedPulling="2026-01-27 20:29:20.984101424 +0000 UTC m=+1586.374354580" observedRunningTime="2026-01-27 20:29:22.007164296 +0000 UTC m=+1587.397417452" watchObservedRunningTime="2026-01-27 20:29:22.011888701 +0000 UTC m=+1587.402141857" Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.039395 4793 generic.go:334] "Generic (PLEG): container finished" podID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerID="0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e" exitCode=0 Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.039440 4793 generic.go:334] "Generic (PLEG): container finished" podID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerID="3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6" exitCode=2 Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.039515 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerDied","Data":"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e"} Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.039550 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerDied","Data":"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6"} Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.063469 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"226a19ba-8411-43a8-966c-f1ea2d67a5bd","Type":"ContainerStarted","Data":"cc9d422ca3f800795ad9e9552d9389bd94d1de8f5d136777ea4ec4aac0c722fe"} Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.750839 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.866891 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.866953 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:22 crc kubenswrapper[4793]: I0127 20:29:22.877030 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.075962 4793 generic.go:334] "Generic (PLEG): container finished" podID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerID="9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c" exitCode=0 Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.076049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerDied","Data":"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c"} Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.079824 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"226a19ba-8411-43a8-966c-f1ea2d67a5bd","Type":"ContainerStarted","Data":"98c4f43799218455d60bb1a2d0a26848d2b19fecd9fc564b602880c0ebe16aea"} Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.079904 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.113814 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.113783808 podStartE2EDuration="3.113783808s" podCreationTimestamp="2026-01-27 20:29:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:23.113402558 +0000 UTC m=+1588.503655724" watchObservedRunningTime="2026-01-27 20:29:23.113783808 +0000 UTC m=+1588.504036964" Jan 27 20:29:23 crc kubenswrapper[4793]: I0127 20:29:23.977917 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:24 crc kubenswrapper[4793]: I0127 20:29:24.025477 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:24 crc kubenswrapper[4793]: I0127 20:29:24.786572 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 20:29:24 crc kubenswrapper[4793]: I0127 20:29:24.832403 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.022222 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.101817 4793 generic.go:334] "Generic (PLEG): container finished" podID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerID="3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3" exitCode=0 Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.101875 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerDied","Data":"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3"} Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.101907 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.101933 4793 scope.go:117] "RemoveContainer" containerID="0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.101920 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"22ca534e-e36b-4d60-845b-ba1752c5c99c","Type":"ContainerDied","Data":"edb073055de088e86ff592c8637a226e873eed9a783774eeb3ef5f92ef50e319"} Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.136774 4793 scope.go:117] "RemoveContainer" containerID="3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.139796 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.170750 4793 scope.go:117] "RemoveContainer" containerID="3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189055 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189187 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189418 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrb7z\" (UniqueName: \"kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189522 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189633 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189761 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.189793 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd\") pod \"22ca534e-e36b-4d60-845b-ba1752c5c99c\" (UID: \"22ca534e-e36b-4d60-845b-ba1752c5c99c\") " Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.190087 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.190591 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.190938 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.214040 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z" (OuterVolumeSpecName: "kube-api-access-xrb7z") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "kube-api-access-xrb7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.215715 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts" (OuterVolumeSpecName: "scripts") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.225638 4793 scope.go:117] "RemoveContainer" containerID="9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.293273 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrb7z\" (UniqueName: \"kubernetes.io/projected/22ca534e-e36b-4d60-845b-ba1752c5c99c-kube-api-access-xrb7z\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.293311 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.293321 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/22ca534e-e36b-4d60-845b-ba1752c5c99c-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.301847 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.311201 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.363288 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data" (OuterVolumeSpecName: "config-data") pod "22ca534e-e36b-4d60-845b-ba1752c5c99c" (UID: "22ca534e-e36b-4d60-845b-ba1752c5c99c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.395721 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.395762 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.395777 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/22ca534e-e36b-4d60-845b-ba1752c5c99c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.925389 4793 scope.go:117] "RemoveContainer" containerID="0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.929946 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e\": container with ID starting with 0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e not found: ID does not exist" containerID="0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.932753 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e"} err="failed to get container status \"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e\": rpc error: code = NotFound desc = could not find container \"0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e\": container with ID starting with 0663d764a1987e35bd6bbf91dac60d52a8ea6643e717ad0870072caf19f7120e not found: ID does not exist" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.932803 4793 scope.go:117] "RemoveContainer" containerID="3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.934611 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6\": container with ID starting with 3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6 not found: ID does not exist" containerID="3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.934674 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6"} err="failed to get container status \"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6\": rpc error: code = NotFound desc = could not find container \"3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6\": container with ID starting with 3cbc7cc19d907d4e8044680fab29dde33669f5fd7028882b07a3bb9a9c3a20b6 not found: ID does not exist" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.934694 4793 scope.go:117] "RemoveContainer" containerID="3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.940319 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3\": container with ID starting with 3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3 not found: ID does not exist" containerID="3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.940364 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3"} err="failed to get container status \"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3\": rpc error: code = NotFound desc = could not find container \"3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3\": container with ID starting with 3b16586b8ff87bad0d9b303c454f756e7c4a0a50bb292228259a80955ffe39a3 not found: ID does not exist" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.940413 4793 scope.go:117] "RemoveContainer" containerID="9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.940740 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c\": container with ID starting with 9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c not found: ID does not exist" containerID="9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.940762 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c"} err="failed to get container status \"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c\": rpc error: code = NotFound desc = could not find container \"9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c\": container with ID starting with 9ac5478a5703cf5ff4569c49173a34ef2bd37c7ef314ddfd7363f52505a32a1c not found: ID does not exist" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.964952 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.982724 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.996251 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.997028 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-central-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997052 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-central-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.997076 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="proxy-httpd" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997085 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="proxy-httpd" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.997098 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-notification-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997106 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-notification-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: E0127 20:29:25.997120 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="sg-core" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997129 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="sg-core" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997386 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-notification-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997401 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="proxy-httpd" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997414 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="ceilometer-central-agent" Jan 27 20:29:25 crc kubenswrapper[4793]: I0127 20:29:25.997617 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" containerName="sg-core" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.000435 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.006829 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.007062 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.008758 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.011464 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172010 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172083 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172126 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172250 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnwlx\" (UniqueName: \"kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172583 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.172635 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.173176 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.173239 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275277 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275371 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275408 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275441 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275474 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275509 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnwlx\" (UniqueName: \"kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275619 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.275654 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.276105 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.282916 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.288950 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.292454 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.293219 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.305251 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.309575 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnwlx\" (UniqueName: \"kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.318220 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data\") pod \"ceilometer-0\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.326097 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:26 crc kubenswrapper[4793]: I0127 20:29:26.805099 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:29:26 crc kubenswrapper[4793]: E0127 20:29:26.805546 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:29:27 crc kubenswrapper[4793]: I0127 20:29:27.255082 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:27 crc kubenswrapper[4793]: I0127 20:29:27.859919 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22ca534e-e36b-4d60-845b-ba1752c5c99c" path="/var/lib/kubelet/pods/22ca534e-e36b-4d60-845b-ba1752c5c99c/volumes" Jan 27 20:29:28 crc kubenswrapper[4793]: I0127 20:29:28.148738 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerStarted","Data":"d880821c4e597a5d30fe8e6f5f1ce7c4bbb61503a944fd2e3258f2f1142b5601"} Jan 27 20:29:28 crc kubenswrapper[4793]: I0127 20:29:28.149039 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerStarted","Data":"8164baee59fbe2769c8955f5416ac98bee19172703236eadfa31488de5e2237d"} Jan 27 20:29:29 crc kubenswrapper[4793]: I0127 20:29:29.160873 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerStarted","Data":"25033292beadb5903cdd962ebc50659773864d7841e20774ad7dbca8ab60e5d6"} Jan 27 20:29:29 crc kubenswrapper[4793]: I0127 20:29:29.161183 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerStarted","Data":"a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2"} Jan 27 20:29:29 crc kubenswrapper[4793]: I0127 20:29:29.935635 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.166050 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.186652 4793 generic.go:334] "Generic (PLEG): container finished" podID="5a24c236-232f-4bd6-8d89-019058789317" containerID="756ef12bab39ddd06e8c9afefa8cc304473802c6b77c88cffc79bbf743d899fa" exitCode=137 Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.186868 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a24c236-232f-4bd6-8d89-019058789317","Type":"ContainerDied","Data":"756ef12bab39ddd06e8c9afefa8cc304473802c6b77c88cffc79bbf743d899fa"} Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.186999 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5a24c236-232f-4bd6-8d89-019058789317","Type":"ContainerDied","Data":"3512ee444b26108b529144ee1db20fde52eb0126c61a6863ca941726d91ce026"} Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.187017 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3512ee444b26108b529144ee1db20fde52eb0126c61a6863ca941726d91ce026" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.196773 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerStarted","Data":"0b23506b449d7d7ec78660c14e7a1a34632c24d8a8f1c978dc27855dee4a7283"} Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.197814 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.229581 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.182593353 podStartE2EDuration="6.229535617s" podCreationTimestamp="2026-01-27 20:29:25 +0000 UTC" firstStartedPulling="2026-01-27 20:29:27.249639185 +0000 UTC m=+1592.639892341" lastFinishedPulling="2026-01-27 20:29:30.296581449 +0000 UTC m=+1595.686834605" observedRunningTime="2026-01-27 20:29:31.223980639 +0000 UTC m=+1596.614233795" watchObservedRunningTime="2026-01-27 20:29:31.229535617 +0000 UTC m=+1596.619788773" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.246913 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.358582 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle\") pod \"5a24c236-232f-4bd6-8d89-019058789317\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.358679 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data\") pod \"5a24c236-232f-4bd6-8d89-019058789317\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.358702 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6b9j\" (UniqueName: \"kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j\") pod \"5a24c236-232f-4bd6-8d89-019058789317\" (UID: \"5a24c236-232f-4bd6-8d89-019058789317\") " Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.378275 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j" (OuterVolumeSpecName: "kube-api-access-d6b9j") pod "5a24c236-232f-4bd6-8d89-019058789317" (UID: "5a24c236-232f-4bd6-8d89-019058789317"). InnerVolumeSpecName "kube-api-access-d6b9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.394496 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data" (OuterVolumeSpecName: "config-data") pod "5a24c236-232f-4bd6-8d89-019058789317" (UID: "5a24c236-232f-4bd6-8d89-019058789317"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.403795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a24c236-232f-4bd6-8d89-019058789317" (UID: "5a24c236-232f-4bd6-8d89-019058789317"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.461094 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6b9j\" (UniqueName: \"kubernetes.io/projected/5a24c236-232f-4bd6-8d89-019058789317-kube-api-access-d6b9j\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.461127 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.461137 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a24c236-232f-4bd6-8d89-019058789317-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.689257 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.689779 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.694481 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 20:29:31 crc kubenswrapper[4793]: I0127 20:29:31.698009 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.204963 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.231839 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.259877 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.272451 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:29:32 crc kubenswrapper[4793]: E0127 20:29:32.273105 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a24c236-232f-4bd6-8d89-019058789317" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.273136 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a24c236-232f-4bd6-8d89-019058789317" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.273412 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a24c236-232f-4bd6-8d89-019058789317" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.274371 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.278678 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.278714 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.278955 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.284403 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.434637 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.434724 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.434919 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.435278 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.435339 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7mp8\" (UniqueName: \"kubernetes.io/projected/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-kube-api-access-z7mp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.537187 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.537239 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7mp8\" (UniqueName: \"kubernetes.io/projected/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-kube-api-access-z7mp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.537297 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.537325 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.537357 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.542156 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.543194 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.544540 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.545037 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.565172 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7mp8\" (UniqueName: \"kubernetes.io/projected/54fdd2d8-8d6e-41f5-9a60-e7367b399aa8-kube-api-access-z7mp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:32 crc kubenswrapper[4793]: I0127 20:29:32.600800 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.288719 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.290969 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.317842 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.354094 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.641128 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 20:29:33 crc kubenswrapper[4793]: I0127 20:29:33.815209 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a24c236-232f-4bd6-8d89-019058789317" path="/var/lib/kubelet/pods/5a24c236-232f-4bd6-8d89-019058789317/volumes" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.260194 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8","Type":"ContainerStarted","Data":"95f28be36f3aa0521095e9afa6fe8d0ed7c782c8f946f446aeadf4edd9bcc60b"} Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.260524 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"54fdd2d8-8d6e-41f5-9a60-e7367b399aa8","Type":"ContainerStarted","Data":"a856d215fee7d8889b547ebb878d6407dbce06eb97ec54282f500509eb591fa0"} Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.260575 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.271555 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.285889 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.28586557 podStartE2EDuration="2.28586557s" podCreationTimestamp="2026-01-27 20:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:34.275524315 +0000 UTC m=+1599.665777491" watchObservedRunningTime="2026-01-27 20:29:34.28586557 +0000 UTC m=+1599.676118726" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.807779 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.811223 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.818277 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858304 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858437 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvvr2\" (UniqueName: \"kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858460 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858490 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858579 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.858632 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960255 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960373 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960440 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvvr2\" (UniqueName: \"kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960459 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960484 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.960534 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.961284 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.964176 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.964268 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.964961 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.965316 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:34 crc kubenswrapper[4793]: I0127 20:29:34.981350 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvvr2\" (UniqueName: \"kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2\") pod \"dnsmasq-dns-646848474f-brx6x\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:35 crc kubenswrapper[4793]: I0127 20:29:35.138047 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:36 crc kubenswrapper[4793]: I0127 20:29:36.361229 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:29:37 crc kubenswrapper[4793]: I0127 20:29:37.328428 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerStarted","Data":"9574fc337eb21abe8fdaaf47ce3bee1cce78d013821415913dec93539f1d6231"} Jan 27 20:29:37 crc kubenswrapper[4793]: I0127 20:29:37.328749 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerStarted","Data":"d3a4a232b108a75c5f61e26466c0bee0ee67aebd372083d26381d4998286e7f8"} Jan 27 20:29:38 crc kubenswrapper[4793]: I0127 20:29:38.260090 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:38 crc kubenswrapper[4793]: I0127 20:29:38.543048 4793 generic.go:334] "Generic (PLEG): container finished" podID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerID="9574fc337eb21abe8fdaaf47ce3bee1cce78d013821415913dec93539f1d6231" exitCode=0 Jan 27 20:29:38 crc kubenswrapper[4793]: I0127 20:29:38.543814 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerDied","Data":"9574fc337eb21abe8fdaaf47ce3bee1cce78d013821415913dec93539f1d6231"} Jan 27 20:29:39 crc kubenswrapper[4793]: I0127 20:29:39.554453 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerStarted","Data":"bc4d58d02b7e4028c571fce5fc335a9ab5c2c8570fcc2481487c27389ae4f848"} Jan 27 20:29:39 crc kubenswrapper[4793]: I0127 20:29:39.554837 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:39 crc kubenswrapper[4793]: I0127 20:29:39.587323 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-646848474f-brx6x" podStartSLOduration=5.587300347 podStartE2EDuration="5.587300347s" podCreationTimestamp="2026-01-27 20:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:39.581651696 +0000 UTC m=+1604.971904852" watchObservedRunningTime="2026-01-27 20:29:39.587300347 +0000 UTC m=+1604.977553503" Jan 27 20:29:40 crc kubenswrapper[4793]: I0127 20:29:40.498095 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:40 crc kubenswrapper[4793]: I0127 20:29:40.498710 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" containerID="cri-o://acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8" gracePeriod=30 Jan 27 20:29:40 crc kubenswrapper[4793]: I0127 20:29:40.498818 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" containerID="cri-o://a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc" gracePeriod=30 Jan 27 20:29:41 crc kubenswrapper[4793]: I0127 20:29:41.030892 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:29:41 crc kubenswrapper[4793]: E0127 20:29:41.035127 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:29:41 crc kubenswrapper[4793]: I0127 20:29:41.576690 4793 generic.go:334] "Generic (PLEG): container finished" podID="d75e0794-645e-437f-8553-777f0883e298" containerID="acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8" exitCode=143 Jan 27 20:29:41 crc kubenswrapper[4793]: I0127 20:29:41.576778 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerDied","Data":"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8"} Jan 27 20:29:42 crc kubenswrapper[4793]: I0127 20:29:42.602464 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:42 crc kubenswrapper[4793]: I0127 20:29:42.629756 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.617422 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.686993 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": read tcp 10.217.0.2:58734->10.217.0.211:8774: read: connection reset by peer" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.687892 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": read tcp 10.217.0.2:58744->10.217.0.211:8774: read: connection reset by peer" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.828898 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-pn9t9"] Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.831300 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.838199 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.839054 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.844140 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pn9t9"] Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.864262 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsz6c\" (UniqueName: \"kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.865426 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.866071 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.866110 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.967131 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.967183 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.967243 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsz6c\" (UniqueName: \"kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.967307 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.973836 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.974681 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.977424 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:43 crc kubenswrapper[4793]: I0127 20:29:43.985456 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsz6c\" (UniqueName: \"kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c\") pod \"nova-cell1-cell-mapping-pn9t9\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.163983 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.489999 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.554861 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data\") pod \"d75e0794-645e-437f-8553-777f0883e298\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.554935 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs\") pod \"d75e0794-645e-437f-8553-777f0883e298\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.555035 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle\") pod \"d75e0794-645e-437f-8553-777f0883e298\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.555152 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l467w\" (UniqueName: \"kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w\") pod \"d75e0794-645e-437f-8553-777f0883e298\" (UID: \"d75e0794-645e-437f-8553-777f0883e298\") " Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.564202 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs" (OuterVolumeSpecName: "logs") pod "d75e0794-645e-437f-8553-777f0883e298" (UID: "d75e0794-645e-437f-8553-777f0883e298"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.585490 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w" (OuterVolumeSpecName: "kube-api-access-l467w") pod "d75e0794-645e-437f-8553-777f0883e298" (UID: "d75e0794-645e-437f-8553-777f0883e298"). InnerVolumeSpecName "kube-api-access-l467w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.615166 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d75e0794-645e-437f-8553-777f0883e298" (UID: "d75e0794-645e-437f-8553-777f0883e298"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.617362 4793 generic.go:334] "Generic (PLEG): container finished" podID="d75e0794-645e-437f-8553-777f0883e298" containerID="a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc" exitCode=0 Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.617810 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.617904 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerDied","Data":"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc"} Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.622759 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d75e0794-645e-437f-8553-777f0883e298","Type":"ContainerDied","Data":"a8e52973511ddb0f712592328e0077e1388adc0b4f3d9ad462674b22257012bb"} Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.622794 4793 scope.go:117] "RemoveContainer" containerID="a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.657700 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d75e0794-645e-437f-8553-777f0883e298-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.657762 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.657772 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l467w\" (UniqueName: \"kubernetes.io/projected/d75e0794-645e-437f-8553-777f0883e298-kube-api-access-l467w\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.676755 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data" (OuterVolumeSpecName: "config-data") pod "d75e0794-645e-437f-8553-777f0883e298" (UID: "d75e0794-645e-437f-8553-777f0883e298"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.727371 4793 scope.go:117] "RemoveContainer" containerID="acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.789889 4793 scope.go:117] "RemoveContainer" containerID="a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc" Jan 27 20:29:44 crc kubenswrapper[4793]: E0127 20:29:44.790451 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc\": container with ID starting with a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc not found: ID does not exist" containerID="a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.790500 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc"} err="failed to get container status \"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc\": rpc error: code = NotFound desc = could not find container \"a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc\": container with ID starting with a591e0b22de91d59fd5aac436215d200adfc319ccd040e366e61500c741ac2cc not found: ID does not exist" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.790534 4793 scope.go:117] "RemoveContainer" containerID="acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.793735 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d75e0794-645e-437f-8553-777f0883e298-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:44 crc kubenswrapper[4793]: E0127 20:29:44.796374 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8\": container with ID starting with acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8 not found: ID does not exist" containerID="acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.796419 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8"} err="failed to get container status \"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8\": rpc error: code = NotFound desc = could not find container \"acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8\": container with ID starting with acce7e97a6c48b77432af0aa29efe1dcf05c2c403f7cbda454626bce370851b8 not found: ID does not exist" Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.972689 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:44 crc kubenswrapper[4793]: I0127 20:29:44.987689 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.022201 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:45 crc kubenswrapper[4793]: E0127 20:29:45.022881 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.022906 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" Jan 27 20:29:45 crc kubenswrapper[4793]: E0127 20:29:45.022929 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.022940 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.023273 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-log" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.023303 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d75e0794-645e-437f-8553-777f0883e298" containerName="nova-api-api" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.024847 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.033209 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.033493 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.033724 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.047357 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.107763 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.107888 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.107963 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.108844 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.109379 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pntnp\" (UniqueName: \"kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.109423 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.126373 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pn9t9"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.141699 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211731 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211796 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211846 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211894 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211928 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pntnp\" (UniqueName: \"kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.211946 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.212835 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.218910 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.237223 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.244668 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.244924 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="dnsmasq-dns" containerID="cri-o://076738602b49bc88fc9f86f069a04488a88a980bbbaf538c27c2cddee88d4eeb" gracePeriod=10 Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.247125 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.260375 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.273224 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pntnp\" (UniqueName: \"kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp\") pod \"nova-api-0\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.295176 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.297401 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.316103 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.316168 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rfkv\" (UniqueName: \"kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.316189 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.317217 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.351159 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.657869 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.657923 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rfkv\" (UniqueName: \"kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.657943 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.658716 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.659572 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.705174 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rfkv\" (UniqueName: \"kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv\") pod \"community-operators-dlzk5\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.740749 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pn9t9" event={"ID":"584f9fed-a572-4b61-8b51-f1178a6cfa76","Type":"ContainerStarted","Data":"4f536334fa91b2f8a9dbc5fe9ea0370aa5e8f14537343f455c3f6f468154f43d"} Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.791732 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.832000 4793 generic.go:334] "Generic (PLEG): container finished" podID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerID="076738602b49bc88fc9f86f069a04488a88a980bbbaf538c27c2cddee88d4eeb" exitCode=0 Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.918713 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d75e0794-645e-437f-8553-777f0883e298" path="/var/lib/kubelet/pods/d75e0794-645e-437f-8553-777f0883e298/volumes" Jan 27 20:29:45 crc kubenswrapper[4793]: I0127 20:29:45.922511 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" event={"ID":"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac","Type":"ContainerDied","Data":"076738602b49bc88fc9f86f069a04488a88a980bbbaf538c27c2cddee88d4eeb"} Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.625600 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.625988 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-central-agent" containerID="cri-o://d880821c4e597a5d30fe8e6f5f1ce7c4bbb61503a944fd2e3258f2f1142b5601" gracePeriod=30 Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.626985 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-notification-agent" containerID="cri-o://a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2" gracePeriod=30 Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.627105 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="sg-core" containerID="cri-o://25033292beadb5903cdd962ebc50659773864d7841e20774ad7dbca8ab60e5d6" gracePeriod=30 Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.626948 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="proxy-httpd" containerID="cri-o://0b23506b449d7d7ec78660c14e7a1a34632c24d8a8f1c978dc27855dee4a7283" gracePeriod=30 Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.657617 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.865797 4793 generic.go:334] "Generic (PLEG): container finished" podID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerID="25033292beadb5903cdd962ebc50659773864d7841e20774ad7dbca8ab60e5d6" exitCode=2 Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.865910 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerDied","Data":"25033292beadb5903cdd962ebc50659773864d7841e20774ad7dbca8ab60e5d6"} Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.868364 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" event={"ID":"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac","Type":"ContainerDied","Data":"11a240f05aba6fba58cbe79cdba708e539af0a8059d2af9a55f4448fff1a994b"} Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.868391 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11a240f05aba6fba58cbe79cdba708e539af0a8059d2af9a55f4448fff1a994b" Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.887125 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pn9t9" event={"ID":"584f9fed-a572-4b61-8b51-f1178a6cfa76","Type":"ContainerStarted","Data":"57a8180be006032de92a7cc9b237d8a8ded708228f98914c0d60febc5fb8fd7c"} Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.898969 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.931907 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.936890 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:46 crc kubenswrapper[4793]: I0127 20:29:46.945483 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-pn9t9" podStartSLOduration=3.945461988 podStartE2EDuration="3.945461988s" podCreationTimestamp="2026-01-27 20:29:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:46.921203684 +0000 UTC m=+1612.311456840" watchObservedRunningTime="2026-01-27 20:29:46.945461988 +0000 UTC m=+1612.335715144" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024321 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024397 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024479 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024528 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glgwp\" (UniqueName: \"kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024693 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.024738 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0\") pod \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\" (UID: \"baa7435e-f1cc-4c71-aaed-712fa2d3e4ac\") " Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.076879 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp" (OuterVolumeSpecName: "kube-api-access-glgwp") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "kube-api-access-glgwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.120801 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config" (OuterVolumeSpecName: "config") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.130924 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.130982 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glgwp\" (UniqueName: \"kubernetes.io/projected/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-kube-api-access-glgwp\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.131659 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.151673 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.183135 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.200890 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" (UID: "baa7435e-f1cc-4c71-aaed-712fa2d3e4ac"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.234920 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.234952 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.234965 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.234973 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.905943 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerStarted","Data":"dbcea712a36bff5b533a5fa70e74a87a187e657c134346f7a8fa43619bd2f07c"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.905996 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerStarted","Data":"8771ab26b7bf8cb8d0728d9c298ac39f9560df089810039e7e6839612bf2ed5d"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.906006 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerStarted","Data":"920fcb8d1d1caefc2499d1a0f62edd2260fbda91cc6cd5dbe2692f20e0e0b77e"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.915984 4793 generic.go:334] "Generic (PLEG): container finished" podID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerID="b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475" exitCode=0 Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.916116 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerDied","Data":"b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.916147 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerStarted","Data":"e08b60f30544c6d4ea87de78c93ed5fd1dff3c978332dc56748652ace15a3799"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.926373 4793 generic.go:334] "Generic (PLEG): container finished" podID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerID="0b23506b449d7d7ec78660c14e7a1a34632c24d8a8f1c978dc27855dee4a7283" exitCode=0 Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.927458 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerDied","Data":"0b23506b449d7d7ec78660c14e7a1a34632c24d8a8f1c978dc27855dee4a7283"} Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.927542 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d6b9584f5-6zm4g" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.949403 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.949383081 podStartE2EDuration="3.949383081s" podCreationTimestamp="2026-01-27 20:29:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:29:47.935516873 +0000 UTC m=+1613.325770029" watchObservedRunningTime="2026-01-27 20:29:47.949383081 +0000 UTC m=+1613.339636237" Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.985702 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:29:47 crc kubenswrapper[4793]: I0127 20:29:47.997389 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d6b9584f5-6zm4g"] Jan 27 20:29:49 crc kubenswrapper[4793]: I0127 20:29:49.228986 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerStarted","Data":"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697"} Jan 27 20:29:49 crc kubenswrapper[4793]: I0127 20:29:49.832449 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" path="/var/lib/kubelet/pods/baa7435e-f1cc-4c71-aaed-712fa2d3e4ac/volumes" Jan 27 20:29:50 crc kubenswrapper[4793]: I0127 20:29:50.286205 4793 generic.go:334] "Generic (PLEG): container finished" podID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerID="40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697" exitCode=0 Jan 27 20:29:50 crc kubenswrapper[4793]: I0127 20:29:50.286297 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerDied","Data":"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697"} Jan 27 20:29:50 crc kubenswrapper[4793]: E0127 20:29:50.646169 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d2cd458_6402_4ad5_9b1a_85ee74eccb2b.slice/crio-a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2.scope\": RecentStats: unable to find data in memory cache]" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.300588 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerStarted","Data":"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69"} Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.305968 4793 generic.go:334] "Generic (PLEG): container finished" podID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerID="a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2" exitCode=0 Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.306027 4793 generic.go:334] "Generic (PLEG): container finished" podID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerID="d880821c4e597a5d30fe8e6f5f1ce7c4bbb61503a944fd2e3258f2f1142b5601" exitCode=0 Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.306053 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerDied","Data":"a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2"} Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.306104 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerDied","Data":"d880821c4e597a5d30fe8e6f5f1ce7c4bbb61503a944fd2e3258f2f1142b5601"} Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.325540 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dlzk5" podStartSLOduration=3.469078064 podStartE2EDuration="6.325515056s" podCreationTimestamp="2026-01-27 20:29:45 +0000 UTC" firstStartedPulling="2026-01-27 20:29:47.918818339 +0000 UTC m=+1613.309071495" lastFinishedPulling="2026-01-27 20:29:50.775255331 +0000 UTC m=+1616.165508487" observedRunningTime="2026-01-27 20:29:51.321770206 +0000 UTC m=+1616.712023362" watchObservedRunningTime="2026-01-27 20:29:51.325515056 +0000 UTC m=+1616.715768212" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.641224 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799362 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799422 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799607 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799643 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799674 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnwlx\" (UniqueName: \"kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799780 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799856 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799910 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml\") pod \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\" (UID: \"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b\") " Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.799969 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.800836 4793 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.801023 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.808079 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx" (OuterVolumeSpecName: "kube-api-access-qnwlx") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "kube-api-access-qnwlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.808221 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts" (OuterVolumeSpecName: "scripts") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.867532 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.896228 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.903530 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.903587 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnwlx\" (UniqueName: \"kubernetes.io/projected/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-kube-api-access-qnwlx\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.903600 4793 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.903610 4793 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.903620 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.904241 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:51 crc kubenswrapper[4793]: I0127 20:29:51.935136 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data" (OuterVolumeSpecName: "config-data") pod "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" (UID: "1d2cd458-6402-4ad5-9b1a-85ee74eccb2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.135096 4793 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.135124 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.319364 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d2cd458-6402-4ad5-9b1a-85ee74eccb2b","Type":"ContainerDied","Data":"8164baee59fbe2769c8955f5416ac98bee19172703236eadfa31488de5e2237d"} Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.319723 4793 scope.go:117] "RemoveContainer" containerID="0b23506b449d7d7ec78660c14e7a1a34632c24d8a8f1c978dc27855dee4a7283" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.319394 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.347483 4793 scope.go:117] "RemoveContainer" containerID="25033292beadb5903cdd962ebc50659773864d7841e20774ad7dbca8ab60e5d6" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.364966 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.376824 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.385945 4793 scope.go:117] "RemoveContainer" containerID="a6aaae42a45feb5b51ac383de104d4a87ec7cc29c62ae1b26032c372f34ab3e2" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.394469 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.406961 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-notification-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.406998 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-notification-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.407041 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-central-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.407052 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-central-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.407066 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="proxy-httpd" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.407077 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="proxy-httpd" Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.407112 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="sg-core" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.407132 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="sg-core" Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.407171 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="init" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.407179 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="init" Jan 27 20:29:52 crc kubenswrapper[4793]: E0127 20:29:52.407207 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="dnsmasq-dns" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.407216 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="dnsmasq-dns" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.408097 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="proxy-httpd" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.408132 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="sg-core" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.408172 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="baa7435e-f1cc-4c71-aaed-712fa2d3e4ac" containerName="dnsmasq-dns" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.408202 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-central-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.408221 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" containerName="ceilometer-notification-agent" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.416954 4793 scope.go:117] "RemoveContainer" containerID="d880821c4e597a5d30fe8e6f5f1ce7c4bbb61503a944fd2e3258f2f1142b5601" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.430513 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.430846 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.433284 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.433496 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.434461 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445107 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-config-data\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445165 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-log-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445219 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-scripts\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445256 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445386 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445468 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csnk8\" (UniqueName: \"kubernetes.io/projected/904d3d32-2c98-4e0d-b8e7-6554e661d780-kube-api-access-csnk8\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445490 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-run-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.445512 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547352 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-config-data\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547420 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-log-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547459 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-scripts\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547490 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547603 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547672 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csnk8\" (UniqueName: \"kubernetes.io/projected/904d3d32-2c98-4e0d-b8e7-6554e661d780-kube-api-access-csnk8\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547696 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-run-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.547717 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.549080 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-log-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.549324 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/904d3d32-2c98-4e0d-b8e7-6554e661d780-run-httpd\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.552971 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.552994 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-scripts\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.553969 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-config-data\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.554691 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.554774 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904d3d32-2c98-4e0d-b8e7-6554e661d780-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.569407 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csnk8\" (UniqueName: \"kubernetes.io/projected/904d3d32-2c98-4e0d-b8e7-6554e661d780-kube-api-access-csnk8\") pod \"ceilometer-0\" (UID: \"904d3d32-2c98-4e0d-b8e7-6554e661d780\") " pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.753118 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.753171 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.766175 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 20:29:52 crc kubenswrapper[4793]: I0127 20:29:52.804784 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:29:53 crc kubenswrapper[4793]: I0127 20:29:53.281783 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 20:29:53 crc kubenswrapper[4793]: I0127 20:29:53.334753 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d"} Jan 27 20:29:53 crc kubenswrapper[4793]: I0127 20:29:53.336468 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"904d3d32-2c98-4e0d-b8e7-6554e661d780","Type":"ContainerStarted","Data":"92dcdc78ddc2eecbc75d9d93f3536ff59d2cc62c62f1d7825850e2b1217716b8"} Jan 27 20:29:53 crc kubenswrapper[4793]: I0127 20:29:53.833969 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d2cd458-6402-4ad5-9b1a-85ee74eccb2b" path="/var/lib/kubelet/pods/1d2cd458-6402-4ad5-9b1a-85ee74eccb2b/volumes" Jan 27 20:29:54 crc kubenswrapper[4793]: I0127 20:29:54.362848 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"904d3d32-2c98-4e0d-b8e7-6554e661d780","Type":"ContainerStarted","Data":"16365d9897275dae6087c863adb184f07668e9f3024f5ca004f4f6701694e425"} Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.355723 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.358976 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.408511 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"904d3d32-2c98-4e0d-b8e7-6554e661d780","Type":"ContainerStarted","Data":"af37275f9ee59aaf73c9b6ca8ccca59d46fd3c6cf1d7a5ad961a64638de4fa61"} Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.793648 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.794049 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:55 crc kubenswrapper[4793]: I0127 20:29:55.880142 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.660192 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.219:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.684851 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.219:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.705273 4793 generic.go:334] "Generic (PLEG): container finished" podID="584f9fed-a572-4b61-8b51-f1178a6cfa76" containerID="57a8180be006032de92a7cc9b237d8a8ded708228f98914c0d60febc5fb8fd7c" exitCode=0 Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.705356 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pn9t9" event={"ID":"584f9fed-a572-4b61-8b51-f1178a6cfa76","Type":"ContainerDied","Data":"57a8180be006032de92a7cc9b237d8a8ded708228f98914c0d60febc5fb8fd7c"} Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.724780 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"904d3d32-2c98-4e0d-b8e7-6554e661d780","Type":"ContainerStarted","Data":"94a7a007857b13ad14b943022260fb0a75801fe0c7a8a757490e902d4d8ca60d"} Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.818594 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:56 crc kubenswrapper[4793]: I0127 20:29:56.886007 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.260874 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.264832 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.360026 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.528089 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.683904 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsz6c\" (UniqueName: \"kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c\") pod \"584f9fed-a572-4b61-8b51-f1178a6cfa76\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.684505 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle\") pod \"584f9fed-a572-4b61-8b51-f1178a6cfa76\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.685155 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data\") pod \"584f9fed-a572-4b61-8b51-f1178a6cfa76\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.685256 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts\") pod \"584f9fed-a572-4b61-8b51-f1178a6cfa76\" (UID: \"584f9fed-a572-4b61-8b51-f1178a6cfa76\") " Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.694479 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c" (OuterVolumeSpecName: "kube-api-access-wsz6c") pod "584f9fed-a572-4b61-8b51-f1178a6cfa76" (UID: "584f9fed-a572-4b61-8b51-f1178a6cfa76"). InnerVolumeSpecName "kube-api-access-wsz6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.695063 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts" (OuterVolumeSpecName: "scripts") pod "584f9fed-a572-4b61-8b51-f1178a6cfa76" (UID: "584f9fed-a572-4b61-8b51-f1178a6cfa76"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.724348 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "584f9fed-a572-4b61-8b51-f1178a6cfa76" (UID: "584f9fed-a572-4b61-8b51-f1178a6cfa76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.733917 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data" (OuterVolumeSpecName: "config-data") pod "584f9fed-a572-4b61-8b51-f1178a6cfa76" (UID: "584f9fed-a572-4b61-8b51-f1178a6cfa76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.753654 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pn9t9" event={"ID":"584f9fed-a572-4b61-8b51-f1178a6cfa76","Type":"ContainerDied","Data":"4f536334fa91b2f8a9dbc5fe9ea0370aa5e8f14537343f455c3f6f468154f43d"} Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.753719 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f536334fa91b2f8a9dbc5fe9ea0370aa5e8f14537343f455c3f6f468154f43d" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.753772 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pn9t9" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.763084 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dlzk5" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="registry-server" containerID="cri-o://0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69" gracePeriod=2 Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.763587 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"904d3d32-2c98-4e0d-b8e7-6554e661d780","Type":"ContainerStarted","Data":"aefdf412431b75e5833def4e1fff2f8c0e44410a9ad5e94ec393db4c576677c9"} Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.764066 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.788474 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsz6c\" (UniqueName: \"kubernetes.io/projected/584f9fed-a572-4b61-8b51-f1178a6cfa76-kube-api-access-wsz6c\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.788528 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.788563 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.788577 4793 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/584f9fed-a572-4b61-8b51-f1178a6cfa76-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.821936 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 20:29:58 crc kubenswrapper[4793]: I0127 20:29:58.822943 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.913780188 podStartE2EDuration="6.82223169s" podCreationTimestamp="2026-01-27 20:29:52 +0000 UTC" firstStartedPulling="2026-01-27 20:29:53.275131394 +0000 UTC m=+1618.665384550" lastFinishedPulling="2026-01-27 20:29:58.183582896 +0000 UTC m=+1623.573836052" observedRunningTime="2026-01-27 20:29:58.815819649 +0000 UTC m=+1624.206072805" watchObservedRunningTime="2026-01-27 20:29:58.82223169 +0000 UTC m=+1624.212484846" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.000614 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.000880 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-log" containerID="cri-o://8771ab26b7bf8cb8d0728d9c298ac39f9560df089810039e7e6839612bf2ed5d" gracePeriod=30 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.001336 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-api" containerID="cri-o://dbcea712a36bff5b533a5fa70e74a87a187e657c134346f7a8fa43619bd2f07c" gracePeriod=30 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.032604 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.033017 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerName="nova-scheduler-scheduler" containerID="cri-o://a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" gracePeriod=30 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.046469 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.048292 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-metadata" containerID="cri-o://893639eaa533e9276b5f8f9d4cfe6aa32461ca5d9e7224615d38299efb3ed1ec" gracePeriod=30 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.048652 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-log" containerID="cri-o://76d975de65f09e3d1d3dd72caddb10aee288896ba6f01a46cc189dcf041cc962" gracePeriod=30 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.367447 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.527342 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rfkv\" (UniqueName: \"kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv\") pod \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.527440 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content\") pod \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.527566 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities\") pod \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\" (UID: \"4ec9890b-d229-4e98-861a-e9b6ebd814a7\") " Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.528635 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities" (OuterVolumeSpecName: "utilities") pod "4ec9890b-d229-4e98-861a-e9b6ebd814a7" (UID: "4ec9890b-d229-4e98-861a-e9b6ebd814a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.535086 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv" (OuterVolumeSpecName: "kube-api-access-2rfkv") pod "4ec9890b-d229-4e98-861a-e9b6ebd814a7" (UID: "4ec9890b-d229-4e98-861a-e9b6ebd814a7"). InnerVolumeSpecName "kube-api-access-2rfkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.570004 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ec9890b-d229-4e98-861a-e9b6ebd814a7" (UID: "4ec9890b-d229-4e98-861a-e9b6ebd814a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.629822 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rfkv\" (UniqueName: \"kubernetes.io/projected/4ec9890b-d229-4e98-861a-e9b6ebd814a7-kube-api-access-2rfkv\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.629870 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.629883 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ec9890b-d229-4e98-861a-e9b6ebd814a7-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.744046 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.748826 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.750623 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.750725 4793 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerName="nova-scheduler-scheduler" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.775203 4793 generic.go:334] "Generic (PLEG): container finished" podID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerID="76d975de65f09e3d1d3dd72caddb10aee288896ba6f01a46cc189dcf041cc962" exitCode=143 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.775280 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerDied","Data":"76d975de65f09e3d1d3dd72caddb10aee288896ba6f01a46cc189dcf041cc962"} Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.777955 4793 generic.go:334] "Generic (PLEG): container finished" podID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerID="8771ab26b7bf8cb8d0728d9c298ac39f9560df089810039e7e6839612bf2ed5d" exitCode=143 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.778049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerDied","Data":"8771ab26b7bf8cb8d0728d9c298ac39f9560df089810039e7e6839612bf2ed5d"} Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.780868 4793 generic.go:334] "Generic (PLEG): container finished" podID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerID="0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69" exitCode=0 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.780987 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlzk5" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.781007 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerDied","Data":"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69"} Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.781040 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlzk5" event={"ID":"4ec9890b-d229-4e98-861a-e9b6ebd814a7","Type":"ContainerDied","Data":"e08b60f30544c6d4ea87de78c93ed5fd1dff3c978332dc56748652ace15a3799"} Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.781066 4793 scope.go:117] "RemoveContainer" containerID="0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.791529 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" exitCode=1 Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.791928 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d"} Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.792890 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.793406 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.870948 4793 scope.go:117] "RemoveContainer" containerID="40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.878302 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.887715 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dlzk5"] Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.919137 4793 scope.go:117] "RemoveContainer" containerID="b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.973927 4793 scope.go:117] "RemoveContainer" containerID="0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69" Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.974453 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69\": container with ID starting with 0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69 not found: ID does not exist" containerID="0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.974507 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69"} err="failed to get container status \"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69\": rpc error: code = NotFound desc = could not find container \"0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69\": container with ID starting with 0eaea5b441454fa1083e043147cbb468a1b8ab9c7d63e41b5bc3cc9891769e69 not found: ID does not exist" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.974572 4793 scope.go:117] "RemoveContainer" containerID="40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697" Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.975158 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697\": container with ID starting with 40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697 not found: ID does not exist" containerID="40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.975207 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697"} err="failed to get container status \"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697\": rpc error: code = NotFound desc = could not find container \"40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697\": container with ID starting with 40be92c79dc15254105ba01a308bd3f3fae2f1e59f59d733b82964714f019697 not found: ID does not exist" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.975237 4793 scope.go:117] "RemoveContainer" containerID="b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475" Jan 27 20:29:59 crc kubenswrapper[4793]: E0127 20:29:59.975506 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475\": container with ID starting with b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475 not found: ID does not exist" containerID="b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.975788 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475"} err="failed to get container status \"b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475\": rpc error: code = NotFound desc = could not find container \"b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475\": container with ID starting with b20d7ada05704190bfea5f5f531a289c66d1cd9950bef2b66826b2af8859f475 not found: ID does not exist" Jan 27 20:29:59 crc kubenswrapper[4793]: I0127 20:29:59.975807 4793 scope.go:117] "RemoveContainer" containerID="97d0960ecfa0822b51ddc9e815fe249a75b7d254c05a21787effc3292e3a768e" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.166783 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7"] Jan 27 20:30:00 crc kubenswrapper[4793]: E0127 20:30:00.167414 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="extract-utilities" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167431 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="extract-utilities" Jan 27 20:30:00 crc kubenswrapper[4793]: E0127 20:30:00.167467 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="registry-server" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167477 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="registry-server" Jan 27 20:30:00 crc kubenswrapper[4793]: E0127 20:30:00.167497 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="584f9fed-a572-4b61-8b51-f1178a6cfa76" containerName="nova-manage" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167506 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="584f9fed-a572-4b61-8b51-f1178a6cfa76" containerName="nova-manage" Jan 27 20:30:00 crc kubenswrapper[4793]: E0127 20:30:00.167530 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="extract-content" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167538 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="extract-content" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167833 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" containerName="registry-server" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.167860 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="584f9fed-a572-4b61-8b51-f1178a6cfa76" containerName="nova-manage" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.168903 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.174179 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.174325 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.182464 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7"] Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.346411 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpnmv\" (UniqueName: \"kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.346847 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.346991 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.448908 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpnmv\" (UniqueName: \"kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.450096 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.450233 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.451407 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.468055 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.468688 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpnmv\" (UniqueName: \"kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv\") pod \"collect-profiles-29492430-v56k7\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.496132 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.812794 4793 generic.go:334] "Generic (PLEG): container finished" podID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerID="893639eaa533e9276b5f8f9d4cfe6aa32461ca5d9e7224615d38299efb3ed1ec" exitCode=0 Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.812989 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerDied","Data":"893639eaa533e9276b5f8f9d4cfe6aa32461ca5d9e7224615d38299efb3ed1ec"} Jan 27 20:30:00 crc kubenswrapper[4793]: I0127 20:30:00.825180 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:00 crc kubenswrapper[4793]: E0127 20:30:00.825507 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.036336 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7"] Jan 27 20:30:01 crc kubenswrapper[4793]: W0127 20:30:01.037103 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61e6d950_0948_4021_8d5b_a8578c9a2326.slice/crio-69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594 WatchSource:0}: Error finding container 69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594: Status 404 returned error can't find the container with id 69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594 Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.583278 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.686371 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs\") pod \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.686672 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9cz4\" (UniqueName: \"kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4\") pod \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.686707 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs\") pod \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.686749 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data\") pod \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.686788 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle\") pod \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\" (UID: \"e6ea88dd-da8a-4657-8785-79b5d30c14e9\") " Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.689276 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs" (OuterVolumeSpecName: "logs") pod "e6ea88dd-da8a-4657-8785-79b5d30c14e9" (UID: "e6ea88dd-da8a-4657-8785-79b5d30c14e9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:30:01 crc kubenswrapper[4793]: I0127 20:30:01.698723 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4" (OuterVolumeSpecName: "kube-api-access-t9cz4") pod "e6ea88dd-da8a-4657-8785-79b5d30c14e9" (UID: "e6ea88dd-da8a-4657-8785-79b5d30c14e9"). InnerVolumeSpecName "kube-api-access-t9cz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.791971 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6ea88dd-da8a-4657-8785-79b5d30c14e9-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.792290 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9cz4\" (UniqueName: \"kubernetes.io/projected/e6ea88dd-da8a-4657-8785-79b5d30c14e9-kube-api-access-t9cz4\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.792723 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data" (OuterVolumeSpecName: "config-data") pod "e6ea88dd-da8a-4657-8785-79b5d30c14e9" (UID: "e6ea88dd-da8a-4657-8785-79b5d30c14e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.793437 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6ea88dd-da8a-4657-8785-79b5d30c14e9" (UID: "e6ea88dd-da8a-4657-8785-79b5d30c14e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.846775 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ec9890b-d229-4e98-861a-e9b6ebd814a7" path="/var/lib/kubelet/pods/4ec9890b-d229-4e98-861a-e9b6ebd814a7/volumes" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.847950 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.856031 4793 generic.go:334] "Generic (PLEG): container finished" podID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" exitCode=0 Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.856093 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c0bda9f-a25e-4618-9a84-a2393d93a9e9","Type":"ContainerDied","Data":"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15"} Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.856119 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c0bda9f-a25e-4618-9a84-a2393d93a9e9","Type":"ContainerDied","Data":"fd142dc22f11af15c62d165bf7d7ed8799d002e6d23f4ee3753825d7c132584b"} Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.856136 4793 scope.go:117] "RemoveContainer" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.856141 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.876537 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" event={"ID":"61e6d950-0948-4021-8d5b-a8578c9a2326","Type":"ContainerStarted","Data":"e48ded758c71fd0e185eb1d22ae5c95425af9d7111522306ecb097d7bab7889f"} Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.876609 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" event={"ID":"61e6d950-0948-4021-8d5b-a8578c9a2326","Type":"ContainerStarted","Data":"69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594"} Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.879484 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e6ea88dd-da8a-4657-8785-79b5d30c14e9","Type":"ContainerDied","Data":"1cf4822ba61f1d0784df7b576acbe2eb039fead86d951eadb3eb35622fc7a416"} Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.881995 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.897824 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqlg9\" (UniqueName: \"kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9\") pod \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.897916 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle\") pod \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.898023 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data\") pod \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\" (UID: \"9c0bda9f-a25e-4618-9a84-a2393d93a9e9\") " Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.898994 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.899013 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.908814 4793 scope.go:117] "RemoveContainer" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.917166 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9" (OuterVolumeSpecName: "kube-api-access-pqlg9") pod "9c0bda9f-a25e-4618-9a84-a2393d93a9e9" (UID: "9c0bda9f-a25e-4618-9a84-a2393d93a9e9"). InnerVolumeSpecName "kube-api-access-pqlg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.927147 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" podStartSLOduration=1.927103274 podStartE2EDuration="1.927103274s" podCreationTimestamp="2026-01-27 20:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:30:01.91002428 +0000 UTC m=+1627.300277436" watchObservedRunningTime="2026-01-27 20:30:01.927103274 +0000 UTC m=+1627.317356430" Jan 27 20:30:02 crc kubenswrapper[4793]: E0127 20:30:01.932265 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15\": container with ID starting with a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15 not found: ID does not exist" containerID="a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.932329 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15"} err="failed to get container status \"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15\": rpc error: code = NotFound desc = could not find container \"a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15\": container with ID starting with a52da84c917d962fd70177dbb08fa298466cf9bfa55dca7bf3903dd1100ecf15 not found: ID does not exist" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.932354 4793 scope.go:117] "RemoveContainer" containerID="893639eaa533e9276b5f8f9d4cfe6aa32461ca5d9e7224615d38299efb3ed1ec" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.935825 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "e6ea88dd-da8a-4657-8785-79b5d30c14e9" (UID: "e6ea88dd-da8a-4657-8785-79b5d30c14e9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.950023 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c0bda9f-a25e-4618-9a84-a2393d93a9e9" (UID: "9c0bda9f-a25e-4618-9a84-a2393d93a9e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:01.960812 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data" (OuterVolumeSpecName: "config-data") pod "9c0bda9f-a25e-4618-9a84-a2393d93a9e9" (UID: "9c0bda9f-a25e-4618-9a84-a2393d93a9e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.001035 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.001068 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.001079 4793 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6ea88dd-da8a-4657-8785-79b5d30c14e9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.001089 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqlg9\" (UniqueName: \"kubernetes.io/projected/9c0bda9f-a25e-4618-9a84-a2393d93a9e9-kube-api-access-pqlg9\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.022873 4793 scope.go:117] "RemoveContainer" containerID="76d975de65f09e3d1d3dd72caddb10aee288896ba6f01a46cc189dcf041cc962" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.228741 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.242686 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.266766 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: E0127 20:30:02.267360 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerName="nova-scheduler-scheduler" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267378 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerName="nova-scheduler-scheduler" Jan 27 20:30:02 crc kubenswrapper[4793]: E0127 20:30:02.267412 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-log" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267420 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-log" Jan 27 20:30:02 crc kubenswrapper[4793]: E0127 20:30:02.267439 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-metadata" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267449 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-metadata" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267717 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" containerName="nova-scheduler-scheduler" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267752 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-metadata" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.267769 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" containerName="nova-metadata-log" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.268941 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.271689 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.312050 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.313535 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-config-data\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.313632 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.313849 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn9bk\" (UniqueName: \"kubernetes.io/projected/45849ced-f655-4b51-a545-b8fd1c0e3d09-kube-api-access-cn9bk\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.343709 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.368654 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.384230 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.386193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.389361 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.389953 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.410802 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.415681 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn9bk\" (UniqueName: \"kubernetes.io/projected/45849ced-f655-4b51-a545-b8fd1c0e3d09-kube-api-access-cn9bk\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.415726 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-config-data\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.416331 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.424503 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.425149 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45849ced-f655-4b51-a545-b8fd1c0e3d09-config-data\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.436267 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn9bk\" (UniqueName: \"kubernetes.io/projected/45849ced-f655-4b51-a545-b8fd1c0e3d09-kube-api-access-cn9bk\") pod \"nova-scheduler-0\" (UID: \"45849ced-f655-4b51-a545-b8fd1c0e3d09\") " pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.518135 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.518186 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhdrp\" (UniqueName: \"kubernetes.io/projected/25954f87-0c42-46c3-abb5-5ec9932665a9-kube-api-access-bhdrp\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.518212 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.518273 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25954f87-0c42-46c3-abb5-5ec9932665a9-logs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.518350 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-config-data\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.619484 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.620441 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-config-data\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.621443 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.621502 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhdrp\" (UniqueName: \"kubernetes.io/projected/25954f87-0c42-46c3-abb5-5ec9932665a9-kube-api-access-bhdrp\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.621537 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.621622 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25954f87-0c42-46c3-abb5-5ec9932665a9-logs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.621997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25954f87-0c42-46c3-abb5-5ec9932665a9-logs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.625188 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-config-data\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.631246 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.644873 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhdrp\" (UniqueName: \"kubernetes.io/projected/25954f87-0c42-46c3-abb5-5ec9932665a9-kube-api-access-bhdrp\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.646079 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/25954f87-0c42-46c3-abb5-5ec9932665a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"25954f87-0c42-46c3-abb5-5ec9932665a9\") " pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.720977 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.922290 4793 generic.go:334] "Generic (PLEG): container finished" podID="61e6d950-0948-4021-8d5b-a8578c9a2326" containerID="e48ded758c71fd0e185eb1d22ae5c95425af9d7111522306ecb097d7bab7889f" exitCode=0 Jan 27 20:30:02 crc kubenswrapper[4793]: I0127 20:30:02.922736 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" event={"ID":"61e6d950-0948-4021-8d5b-a8578c9a2326","Type":"ContainerDied","Data":"e48ded758c71fd0e185eb1d22ae5c95425af9d7111522306ecb097d7bab7889f"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.193786 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.243140 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.244495 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:03 crc kubenswrapper[4793]: E0127 20:30:03.244832 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.312074 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 20:30:03 crc kubenswrapper[4793]: W0127 20:30:03.322311 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25954f87_0c42_46c3_abb5_5ec9932665a9.slice/crio-c52718854c900bf5175ba75ae7b26e491c18045a9a9eb2305b186046b7b4d3d7 WatchSource:0}: Error finding container c52718854c900bf5175ba75ae7b26e491c18045a9a9eb2305b186046b7b4d3d7: Status 404 returned error can't find the container with id c52718854c900bf5175ba75ae7b26e491c18045a9a9eb2305b186046b7b4d3d7 Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.821669 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c0bda9f-a25e-4618-9a84-a2393d93a9e9" path="/var/lib/kubelet/pods/9c0bda9f-a25e-4618-9a84-a2393d93a9e9/volumes" Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.822637 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6ea88dd-da8a-4657-8785-79b5d30c14e9" path="/var/lib/kubelet/pods/e6ea88dd-da8a-4657-8785-79b5d30c14e9/volumes" Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.944655 4793 generic.go:334] "Generic (PLEG): container finished" podID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerID="dbcea712a36bff5b533a5fa70e74a87a187e657c134346f7a8fa43619bd2f07c" exitCode=0 Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.944726 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerDied","Data":"dbcea712a36bff5b533a5fa70e74a87a187e657c134346f7a8fa43619bd2f07c"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.946453 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45849ced-f655-4b51-a545-b8fd1c0e3d09","Type":"ContainerStarted","Data":"a4263d8a7e9087eefeab64ca7c89e59aadac686988abcaaede6aa47d6ec5157c"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.946480 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45849ced-f655-4b51-a545-b8fd1c0e3d09","Type":"ContainerStarted","Data":"c821b17058179cee3142ea05db7db98123dafd4c017b84d48949357f849d4246"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.949946 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"25954f87-0c42-46c3-abb5-5ec9932665a9","Type":"ContainerStarted","Data":"4ad23100aa9b258779529d122b3d821f4bd4a428f8f300eb606353901de9ea83"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.949996 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"25954f87-0c42-46c3-abb5-5ec9932665a9","Type":"ContainerStarted","Data":"2bbf93c09fad86cedc37a46eff842cbca1f7303b8e467ae4b9ea23d43662863d"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.950007 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"25954f87-0c42-46c3-abb5-5ec9932665a9","Type":"ContainerStarted","Data":"c52718854c900bf5175ba75ae7b26e491c18045a9a9eb2305b186046b7b4d3d7"} Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.985603 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.9855783169999999 podStartE2EDuration="1.985578317s" podCreationTimestamp="2026-01-27 20:30:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:30:03.966958501 +0000 UTC m=+1629.357211657" watchObservedRunningTime="2026-01-27 20:30:03.985578317 +0000 UTC m=+1629.375831473" Jan 27 20:30:03 crc kubenswrapper[4793]: I0127 20:30:03.998999 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.9989721120000001 podStartE2EDuration="1.998972112s" podCreationTimestamp="2026-01-27 20:30:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:30:03.992429968 +0000 UTC m=+1629.382683144" watchObservedRunningTime="2026-01-27 20:30:03.998972112 +0000 UTC m=+1629.389225268" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.416246 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.479676 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume\") pod \"61e6d950-0948-4021-8d5b-a8578c9a2326\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.483139 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume" (OuterVolumeSpecName: "config-volume") pod "61e6d950-0948-4021-8d5b-a8578c9a2326" (UID: "61e6d950-0948-4021-8d5b-a8578c9a2326"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.486326 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume\") pod \"61e6d950-0948-4021-8d5b-a8578c9a2326\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.491462 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpnmv\" (UniqueName: \"kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv\") pod \"61e6d950-0948-4021-8d5b-a8578c9a2326\" (UID: \"61e6d950-0948-4021-8d5b-a8578c9a2326\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.492463 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61e6d950-0948-4021-8d5b-a8578c9a2326-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.525952 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv" (OuterVolumeSpecName: "kube-api-access-dpnmv") pod "61e6d950-0948-4021-8d5b-a8578c9a2326" (UID: "61e6d950-0948-4021-8d5b-a8578c9a2326"). InnerVolumeSpecName "kube-api-access-dpnmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.538773 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "61e6d950-0948-4021-8d5b-a8578c9a2326" (UID: "61e6d950-0948-4021-8d5b-a8578c9a2326"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.596828 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpnmv\" (UniqueName: \"kubernetes.io/projected/61e6d950-0948-4021-8d5b-a8578c9a2326-kube-api-access-dpnmv\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.596898 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61e6d950-0948-4021-8d5b-a8578c9a2326-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.620389 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703346 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703487 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703590 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pntnp\" (UniqueName: \"kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703667 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703713 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.703752 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs\") pod \"de788f34-7338-4484-b3cb-af96c8e4fa85\" (UID: \"de788f34-7338-4484-b3cb-af96c8e4fa85\") " Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.705028 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs" (OuterVolumeSpecName: "logs") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.735273 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp" (OuterVolumeSpecName: "kube-api-access-pntnp") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "kube-api-access-pntnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.755808 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.797398 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data" (OuterVolumeSpecName: "config-data") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.805755 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.805783 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pntnp\" (UniqueName: \"kubernetes.io/projected/de788f34-7338-4484-b3cb-af96c8e4fa85-kube-api-access-pntnp\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.805795 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.805803 4793 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de788f34-7338-4484-b3cb-af96c8e4fa85-logs\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.812628 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.827168 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "de788f34-7338-4484-b3cb-af96c8e4fa85" (UID: "de788f34-7338-4484-b3cb-af96c8e4fa85"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.908325 4793 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.908375 4793 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de788f34-7338-4484-b3cb-af96c8e4fa85-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.963166 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.963145 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7" event={"ID":"61e6d950-0948-4021-8d5b-a8578c9a2326","Type":"ContainerDied","Data":"69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594"} Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.963332 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69cf741b0b2a7f5e0f0e4c866b14787192ce5c48d6e5bfda9dae65ce487f6594" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.966289 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de788f34-7338-4484-b3cb-af96c8e4fa85","Type":"ContainerDied","Data":"920fcb8d1d1caefc2499d1a0f62edd2260fbda91cc6cd5dbe2692f20e0e0b77e"} Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.966343 4793 scope.go:117] "RemoveContainer" containerID="dbcea712a36bff5b533a5fa70e74a87a187e657c134346f7a8fa43619bd2f07c" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.966350 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:30:04 crc kubenswrapper[4793]: I0127 20:30:04.993670 4793 scope.go:117] "RemoveContainer" containerID="8771ab26b7bf8cb8d0728d9c298ac39f9560df089810039e7e6839612bf2ed5d" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.030633 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.046821 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.059439 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 20:30:05 crc kubenswrapper[4793]: E0127 20:30:05.060066 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-log" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060091 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-log" Jan 27 20:30:05 crc kubenswrapper[4793]: E0127 20:30:05.060115 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e6d950-0948-4021-8d5b-a8578c9a2326" containerName="collect-profiles" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060122 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e6d950-0948-4021-8d5b-a8578c9a2326" containerName="collect-profiles" Jan 27 20:30:05 crc kubenswrapper[4793]: E0127 20:30:05.060134 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-api" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060141 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-api" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060347 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-log" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060380 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" containerName="nova-api-api" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.060400 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e6d950-0948-4021-8d5b-a8578c9a2326" containerName="collect-profiles" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.061733 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.064704 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.064966 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.065383 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.069081 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.112390 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbkx4\" (UniqueName: \"kubernetes.io/projected/2d5a3a2a-60fe-4c25-9b34-39831787c64d-kube-api-access-cbkx4\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.112685 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.112872 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-public-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.112977 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d5a3a2a-60fe-4c25-9b34-39831787c64d-logs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.113071 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-config-data\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.113251 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215395 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215599 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-public-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215680 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d5a3a2a-60fe-4c25-9b34-39831787c64d-logs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215773 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-config-data\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215878 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.215939 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbkx4\" (UniqueName: \"kubernetes.io/projected/2d5a3a2a-60fe-4c25-9b34-39831787c64d-kube-api-access-cbkx4\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.216118 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d5a3a2a-60fe-4c25-9b34-39831787c64d-logs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.220750 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.221117 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.224478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-public-tls-certs\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.224918 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5a3a2a-60fe-4c25-9b34-39831787c64d-config-data\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.234155 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbkx4\" (UniqueName: \"kubernetes.io/projected/2d5a3a2a-60fe-4c25-9b34-39831787c64d-kube-api-access-cbkx4\") pod \"nova-api-0\" (UID: \"2d5a3a2a-60fe-4c25-9b34-39831787c64d\") " pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.383773 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.953644 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de788f34-7338-4484-b3cb-af96c8e4fa85" path="/var/lib/kubelet/pods/de788f34-7338-4484-b3cb-af96c8e4fa85/volumes" Jan 27 20:30:05 crc kubenswrapper[4793]: I0127 20:30:05.988323 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 20:30:06 crc kubenswrapper[4793]: I0127 20:30:06.989143 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2d5a3a2a-60fe-4c25-9b34-39831787c64d","Type":"ContainerStarted","Data":"413586743da8393b3e3e0c674119a207742c4aca4f14cf24046fc176b8a0db5f"} Jan 27 20:30:06 crc kubenswrapper[4793]: I0127 20:30:06.989621 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2d5a3a2a-60fe-4c25-9b34-39831787c64d","Type":"ContainerStarted","Data":"a5a60778fa628746bfe05825f3b7385afe9b4165fb1733a1688b17e487c39871"} Jan 27 20:30:06 crc kubenswrapper[4793]: I0127 20:30:06.989634 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2d5a3a2a-60fe-4c25-9b34-39831787c64d","Type":"ContainerStarted","Data":"00e10af3e37a8309a18471d33548f5f6d495474ded9f03f16813afce7624e9af"} Jan 27 20:30:07 crc kubenswrapper[4793]: I0127 20:30:07.019692 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.019660569 podStartE2EDuration="2.019660569s" podCreationTimestamp="2026-01-27 20:30:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:30:07.005110702 +0000 UTC m=+1632.395363858" watchObservedRunningTime="2026-01-27 20:30:07.019660569 +0000 UTC m=+1632.409913725" Jan 27 20:30:07 crc kubenswrapper[4793]: I0127 20:30:07.620012 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 20:30:07 crc kubenswrapper[4793]: I0127 20:30:07.722640 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:30:07 crc kubenswrapper[4793]: I0127 20:30:07.722725 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 20:30:08 crc kubenswrapper[4793]: I0127 20:30:08.243300 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:30:08 crc kubenswrapper[4793]: I0127 20:30:08.243358 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:30:08 crc kubenswrapper[4793]: I0127 20:30:08.244245 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:08 crc kubenswrapper[4793]: E0127 20:30:08.244667 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:12 crc kubenswrapper[4793]: I0127 20:30:12.619922 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 20:30:12 crc kubenswrapper[4793]: I0127 20:30:12.658540 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 20:30:12 crc kubenswrapper[4793]: I0127 20:30:12.721794 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 20:30:12 crc kubenswrapper[4793]: I0127 20:30:12.722257 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 20:30:13 crc kubenswrapper[4793]: I0127 20:30:13.125100 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 20:30:13 crc kubenswrapper[4793]: I0127 20:30:13.737783 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="25954f87-0c42-46c3-abb5-5ec9932665a9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:30:13 crc kubenswrapper[4793]: I0127 20:30:13.737783 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="25954f87-0c42-46c3-abb5-5ec9932665a9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:30:15 crc kubenswrapper[4793]: I0127 20:30:15.385448 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:30:15 crc kubenswrapper[4793]: I0127 20:30:15.385903 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 20:30:16 crc kubenswrapper[4793]: I0127 20:30:16.403732 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2d5a3a2a-60fe-4c25-9b34-39831787c64d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 20:30:16 crc kubenswrapper[4793]: I0127 20:30:16.403732 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2d5a3a2a-60fe-4c25-9b34-39831787c64d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 20:30:19 crc kubenswrapper[4793]: I0127 20:30:19.804070 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:19 crc kubenswrapper[4793]: E0127 20:30:19.804800 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.727961 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.729481 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.733088 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.753464 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.753512 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:30:22 crc kubenswrapper[4793]: I0127 20:30:22.789945 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 20:30:23 crc kubenswrapper[4793]: I0127 20:30:23.207314 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 20:30:25 crc kubenswrapper[4793]: I0127 20:30:25.397839 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 20:30:25 crc kubenswrapper[4793]: I0127 20:30:25.398463 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 20:30:25 crc kubenswrapper[4793]: I0127 20:30:25.399191 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 20:30:25 crc kubenswrapper[4793]: I0127 20:30:25.426408 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 20:30:26 crc kubenswrapper[4793]: I0127 20:30:26.230965 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 20:30:26 crc kubenswrapper[4793]: I0127 20:30:26.242515 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 20:30:28 crc kubenswrapper[4793]: I0127 20:30:28.257835 4793 scope.go:117] "RemoveContainer" containerID="d254585d3a1638b741163ab1ed4c8c7be458c83042ef63e1ed060e5d1852d0dd" Jan 27 20:30:33 crc kubenswrapper[4793]: I0127 20:30:33.803646 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:33 crc kubenswrapper[4793]: E0127 20:30:33.804781 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:46 crc kubenswrapper[4793]: I0127 20:30:46.803961 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:46 crc kubenswrapper[4793]: E0127 20:30:46.804878 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:30:52 crc kubenswrapper[4793]: I0127 20:30:52.753401 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:30:52 crc kubenswrapper[4793]: I0127 20:30:52.753972 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:30:52 crc kubenswrapper[4793]: I0127 20:30:52.754021 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:30:52 crc kubenswrapper[4793]: I0127 20:30:52.754830 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:30:52 crc kubenswrapper[4793]: I0127 20:30:52.754896 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" gracePeriod=600 Jan 27 20:30:52 crc kubenswrapper[4793]: E0127 20:30:52.875272 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:30:53 crc kubenswrapper[4793]: I0127 20:30:53.530820 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" exitCode=0 Jan 27 20:30:53 crc kubenswrapper[4793]: I0127 20:30:53.530889 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3"} Jan 27 20:30:53 crc kubenswrapper[4793]: I0127 20:30:53.530957 4793 scope.go:117] "RemoveContainer" containerID="fdd2916150f8a42b633c00852950908ff71fd3b561d65e72206d0f902e9390f6" Jan 27 20:30:53 crc kubenswrapper[4793]: I0127 20:30:53.532072 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:30:53 crc kubenswrapper[4793]: E0127 20:30:53.532351 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:30:59 crc kubenswrapper[4793]: I0127 20:30:59.803716 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:30:59 crc kubenswrapper[4793]: E0127 20:30:59.804495 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:31:08 crc kubenswrapper[4793]: I0127 20:31:08.803335 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:31:08 crc kubenswrapper[4793]: E0127 20:31:08.804159 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:31:14 crc kubenswrapper[4793]: I0127 20:31:14.803578 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:31:14 crc kubenswrapper[4793]: E0127 20:31:14.804343 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:31:20 crc kubenswrapper[4793]: I0127 20:31:20.803876 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:31:20 crc kubenswrapper[4793]: E0127 20:31:20.804661 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:31:28 crc kubenswrapper[4793]: I0127 20:31:28.452100 4793 scope.go:117] "RemoveContainer" containerID="6a841516b7c13b7c410bb8772ab161cef66a5a5372bb096a0082970b77e7332d" Jan 27 20:31:28 crc kubenswrapper[4793]: I0127 20:31:28.512962 4793 scope.go:117] "RemoveContainer" containerID="8dc3a754e1e48f0ce5c63319685061290592009e62276a55f05fff31da8b1562" Jan 27 20:31:28 crc kubenswrapper[4793]: I0127 20:31:28.804011 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:31:28 crc kubenswrapper[4793]: E0127 20:31:28.804283 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:31:35 crc kubenswrapper[4793]: I0127 20:31:35.810865 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:31:35 crc kubenswrapper[4793]: E0127 20:31:35.811808 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:31:40 crc kubenswrapper[4793]: I0127 20:31:40.803614 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:31:40 crc kubenswrapper[4793]: E0127 20:31:40.804115 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:31:50 crc kubenswrapper[4793]: I0127 20:31:50.803574 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:31:50 crc kubenswrapper[4793]: E0127 20:31:50.804250 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:31:55 crc kubenswrapper[4793]: I0127 20:31:55.818951 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:31:55 crc kubenswrapper[4793]: E0127 20:31:55.819895 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:03 crc kubenswrapper[4793]: I0127 20:32:03.804098 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:32:03 crc kubenswrapper[4793]: E0127 20:32:03.805024 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:32:09 crc kubenswrapper[4793]: I0127 20:32:09.803833 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:32:09 crc kubenswrapper[4793]: E0127 20:32:09.804791 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:15 crc kubenswrapper[4793]: I0127 20:32:15.817583 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:32:15 crc kubenswrapper[4793]: E0127 20:32:15.818430 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:32:23 crc kubenswrapper[4793]: I0127 20:32:23.803782 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:32:23 crc kubenswrapper[4793]: E0127 20:32:23.805584 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:28 crc kubenswrapper[4793]: I0127 20:32:28.609648 4793 scope.go:117] "RemoveContainer" containerID="f55e52b39dbb515faebaa957e9fc502d437a957b91a622ff5429fbe75873c24b" Jan 27 20:32:28 crc kubenswrapper[4793]: I0127 20:32:28.663794 4793 scope.go:117] "RemoveContainer" containerID="0135ecd6f39cda93f94f90acd8016130a9af4edd6d14a11a2a21510f0611c6a8" Jan 27 20:32:28 crc kubenswrapper[4793]: I0127 20:32:28.807174 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:32:28 crc kubenswrapper[4793]: E0127 20:32:28.807689 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:32:34 crc kubenswrapper[4793]: I0127 20:32:34.804886 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:32:34 crc kubenswrapper[4793]: E0127 20:32:34.806294 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:42 crc kubenswrapper[4793]: I0127 20:32:42.804068 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:32:42 crc kubenswrapper[4793]: E0127 20:32:42.804818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:32:48 crc kubenswrapper[4793]: I0127 20:32:48.804153 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:32:49 crc kubenswrapper[4793]: I0127 20:32:49.297315 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2"} Jan 27 20:32:52 crc kubenswrapper[4793]: I0127 20:32:52.329304 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" exitCode=1 Jan 27 20:32:52 crc kubenswrapper[4793]: I0127 20:32:52.329435 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2"} Jan 27 20:32:52 crc kubenswrapper[4793]: I0127 20:32:52.329833 4793 scope.go:117] "RemoveContainer" containerID="eb0f6ace51b062967faa70fdc6acfe993ccb25c1fe6a5cc646f9a7af2aab162d" Jan 27 20:32:52 crc kubenswrapper[4793]: I0127 20:32:52.330863 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:32:52 crc kubenswrapper[4793]: E0127 20:32:52.331355 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:53 crc kubenswrapper[4793]: I0127 20:32:53.243447 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:32:53 crc kubenswrapper[4793]: I0127 20:32:53.342031 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:32:53 crc kubenswrapper[4793]: E0127 20:32:53.342385 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:32:55 crc kubenswrapper[4793]: I0127 20:32:55.830830 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:32:55 crc kubenswrapper[4793]: E0127 20:32:55.832748 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:32:58 crc kubenswrapper[4793]: I0127 20:32:58.242497 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:32:58 crc kubenswrapper[4793]: I0127 20:32:58.243003 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:32:58 crc kubenswrapper[4793]: I0127 20:32:58.243051 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:32:58 crc kubenswrapper[4793]: I0127 20:32:58.243949 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:32:58 crc kubenswrapper[4793]: E0127 20:32:58.244210 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:33:06 crc kubenswrapper[4793]: I0127 20:33:06.803199 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:33:06 crc kubenswrapper[4793]: E0127 20:33:06.804029 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:33:10 crc kubenswrapper[4793]: I0127 20:33:10.803766 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:33:10 crc kubenswrapper[4793]: E0127 20:33:10.804404 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:33:18 crc kubenswrapper[4793]: I0127 20:33:18.803420 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:33:18 crc kubenswrapper[4793]: E0127 20:33:18.804289 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:33:21 crc kubenswrapper[4793]: I0127 20:33:21.804344 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:33:21 crc kubenswrapper[4793]: E0127 20:33:21.804895 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.761112 4793 scope.go:117] "RemoveContainer" containerID="797e0c8a0dd5f82b7a4ffe184a06800d3820b7b892ca0745baa7ac10658793f5" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.797269 4793 scope.go:117] "RemoveContainer" containerID="c3f58fb8d65ad0d715698cf7349fbd870ec40a6b9c8106589ae11c0ed0c8328e" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.822698 4793 scope.go:117] "RemoveContainer" containerID="171f806f43a9c519891fb1a87d398d8a4ec7313a80c4dcaff2917449857ae3ca" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.844814 4793 scope.go:117] "RemoveContainer" containerID="8dc8b4f68261a58f0ab83795e3b834111a882ca5c8361ea0cb2370e508af6ae8" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.867573 4793 scope.go:117] "RemoveContainer" containerID="9bff0f955bb0a8dfad28900c476d21c26397534e939a6b98377112c3e89c4e5a" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.888411 4793 scope.go:117] "RemoveContainer" containerID="b059262a1921cdefdb4e62cf235283090a344a49000a4e4d219675fe55c8c8f5" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.915960 4793 scope.go:117] "RemoveContainer" containerID="ddc3ebb1921f3d6d9ae0e98f38b88a4d329efb259009ff3f475c17fe7d1fd149" Jan 27 20:33:28 crc kubenswrapper[4793]: I0127 20:33:28.938736 4793 scope.go:117] "RemoveContainer" containerID="98563a4eb2a696935e83a51b8bd4a46c15a10a9210e8057d81a360e02c721d48" Jan 27 20:33:29 crc kubenswrapper[4793]: I0127 20:33:29.127448 4793 scope.go:117] "RemoveContainer" containerID="f9f61f96b9b70889d4ee24b1a5a03d616fbb327ee4e3093ca209f52c490c982c" Jan 27 20:33:31 crc kubenswrapper[4793]: I0127 20:33:31.803707 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:33:31 crc kubenswrapper[4793]: E0127 20:33:31.804611 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:33:35 crc kubenswrapper[4793]: I0127 20:33:35.814412 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:33:35 crc kubenswrapper[4793]: E0127 20:33:35.815495 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:33:43 crc kubenswrapper[4793]: I0127 20:33:43.804146 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:33:43 crc kubenswrapper[4793]: E0127 20:33:43.804918 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:33:46 crc kubenswrapper[4793]: I0127 20:33:46.802955 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:33:46 crc kubenswrapper[4793]: E0127 20:33:46.803602 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:33:54 crc kubenswrapper[4793]: I0127 20:33:54.803661 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:33:54 crc kubenswrapper[4793]: E0127 20:33:54.804488 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:33:57 crc kubenswrapper[4793]: I0127 20:33:57.804623 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:33:57 crc kubenswrapper[4793]: E0127 20:33:57.805528 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:34:05 crc kubenswrapper[4793]: I0127 20:34:05.810725 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:34:05 crc kubenswrapper[4793]: E0127 20:34:05.811482 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:34:10 crc kubenswrapper[4793]: I0127 20:34:10.803960 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:34:10 crc kubenswrapper[4793]: E0127 20:34:10.804887 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:34:20 crc kubenswrapper[4793]: I0127 20:34:20.803995 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:34:20 crc kubenswrapper[4793]: E0127 20:34:20.804851 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:34:23 crc kubenswrapper[4793]: I0127 20:34:23.047440 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-dtqh6"] Jan 27 20:34:23 crc kubenswrapper[4793]: I0127 20:34:23.059029 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-dtqh6"] Jan 27 20:34:23 crc kubenswrapper[4793]: I0127 20:34:23.818763 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c2d66d7-1414-4bc8-9131-9af3080a5c4f" path="/var/lib/kubelet/pods/4c2d66d7-1414-4bc8-9131-9af3080a5c4f/volumes" Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.036592 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-3d85-account-create-update-qqk7s"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.048536 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-3d85-account-create-update-qqk7s"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.060117 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f09f-account-create-update-kchl7"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.070907 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-a8bf-account-create-update-jpwvn"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.083944 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-a8bf-account-create-update-jpwvn"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.100838 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f09f-account-create-update-kchl7"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.117922 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kbw4s"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.135955 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-wzh4x"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.148328 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kbw4s"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.160628 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-wzh4x"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.170655 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-kth5t"] Jan 27 20:34:24 crc kubenswrapper[4793]: I0127 20:34:24.180750 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-kth5t"] Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.810129 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:34:25 crc kubenswrapper[4793]: E0127 20:34:25.810732 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.815334 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29c672ac-584e-4777-8c08-4f78c6286686" path="/var/lib/kubelet/pods/29c672ac-584e-4777-8c08-4f78c6286686/volumes" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.816211 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9047e5d3-ba8c-49d3-af56-6f6b3a090759" path="/var/lib/kubelet/pods/9047e5d3-ba8c-49d3-af56-6f6b3a090759/volumes" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.817174 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d33de24-4f5c-4cff-8da3-7848753edd2a" path="/var/lib/kubelet/pods/9d33de24-4f5c-4cff-8da3-7848753edd2a/volumes" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.818052 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a267a0de-2ca0-4324-a99d-faf15e41e8ff" path="/var/lib/kubelet/pods/a267a0de-2ca0-4324-a99d-faf15e41e8ff/volumes" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.819896 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0ab0f0f-0759-461c-b18e-ca19b1a627e0" path="/var/lib/kubelet/pods/b0ab0f0f-0759-461c-b18e-ca19b1a627e0/volumes" Jan 27 20:34:25 crc kubenswrapper[4793]: I0127 20:34:25.820678 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f847ba7a-9d1b-4fbd-8ca3-7d64813b628a" path="/var/lib/kubelet/pods/f847ba7a-9d1b-4fbd-8ca3-7d64813b628a/volumes" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.237163 4793 scope.go:117] "RemoveContainer" containerID="5ad6a7efcba56f4aa977f41c194f05638235e97d479801b2ec91f8889f0635ed" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.279049 4793 scope.go:117] "RemoveContainer" containerID="63064aa02c2270eb030fc2d7958b8f3befa944fe2ea1cd70f86590ad461d57e2" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.319836 4793 scope.go:117] "RemoveContainer" containerID="29c4c37cfbf9cb56c22b7aa90d7b916d9fdc12a02810ff12030a18987dd0a2b7" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.373998 4793 scope.go:117] "RemoveContainer" containerID="0a3b4d61631f60c9a06084897dc2e6a6dd357c966ba1b3d2a070ac08424b0bfc" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.440280 4793 scope.go:117] "RemoveContainer" containerID="0649ee374bd61944b4796eba42b0518fb013cd0f75d9c150853e874c7711218b" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.497914 4793 scope.go:117] "RemoveContainer" containerID="4c6bf41d808408660b902a7302b5c82971454991eefa0fddb7c0b2c0868b932d" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.563776 4793 scope.go:117] "RemoveContainer" containerID="1ba26e21b66fbc867cee1e1c13aa2b549d0cb9bbb14b0a1fad66c04f99e56556" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.584799 4793 scope.go:117] "RemoveContainer" containerID="57ddea27c26fd277acfc2270877b9d7e587de2fc84465cd194cfc9dbae24d76e" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.607023 4793 scope.go:117] "RemoveContainer" containerID="d8474966c2d0acd4b9224731d471424be99dfd680f86a82e43f2662a3a059c1f" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.629472 4793 scope.go:117] "RemoveContainer" containerID="a72ad186481bcdbef59b21b3b72cb53534800cd94451703905e859382f8ac3f5" Jan 27 20:34:29 crc kubenswrapper[4793]: I0127 20:34:29.658727 4793 scope.go:117] "RemoveContainer" containerID="6c02c5b0406fcdb10b2247b96accf11bff0027707f55f2f43b52506ccfe6e2d1" Jan 27 20:34:33 crc kubenswrapper[4793]: I0127 20:34:33.803786 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:34:33 crc kubenswrapper[4793]: E0127 20:34:33.804501 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:34:40 crc kubenswrapper[4793]: I0127 20:34:40.804110 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:34:40 crc kubenswrapper[4793]: E0127 20:34:40.805121 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:34:47 crc kubenswrapper[4793]: I0127 20:34:47.803189 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:34:47 crc kubenswrapper[4793]: E0127 20:34:47.804083 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:34:51 crc kubenswrapper[4793]: I0127 20:34:51.809430 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:34:51 crc kubenswrapper[4793]: E0127 20:34:51.810127 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:34:59 crc kubenswrapper[4793]: I0127 20:34:59.167079 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5mzxt"] Jan 27 20:34:59 crc kubenswrapper[4793]: I0127 20:34:59.181586 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5mzxt"] Jan 27 20:34:59 crc kubenswrapper[4793]: I0127 20:34:59.815467 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6e54982-02cd-4003-8bf4-e10dfade4061" path="/var/lib/kubelet/pods/f6e54982-02cd-4003-8bf4-e10dfade4061/volumes" Jan 27 20:35:00 crc kubenswrapper[4793]: I0127 20:35:00.133287 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-ee28-account-create-update-rdbqh"] Jan 27 20:35:00 crc kubenswrapper[4793]: I0127 20:35:00.144433 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-ee28-account-create-update-rdbqh"] Jan 27 20:35:01 crc kubenswrapper[4793]: I0127 20:35:01.817468 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efa4e39e-3cd2-4c39-bc58-b257b5bf95a7" path="/var/lib/kubelet/pods/efa4e39e-3cd2-4c39-bc58-b257b5bf95a7/volumes" Jan 27 20:35:02 crc kubenswrapper[4793]: I0127 20:35:02.803771 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:35:02 crc kubenswrapper[4793]: E0127 20:35:02.804128 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:35:04 crc kubenswrapper[4793]: I0127 20:35:04.805117 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:35:04 crc kubenswrapper[4793]: E0127 20:35:04.805650 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:35:10 crc kubenswrapper[4793]: I0127 20:35:10.050651 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-58r7m"] Jan 27 20:35:10 crc kubenswrapper[4793]: I0127 20:35:10.062061 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-58r7m"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.087111 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-q7xrq"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.096090 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5058-account-create-update-jrldw"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.104778 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-423f-account-create-update-xxmkp"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.115252 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-sqcpz"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.125914 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-q7xrq"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.136198 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5058-account-create-update-jrldw"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.146917 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-423f-account-create-update-xxmkp"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.156961 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-sqcpz"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.278460 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.288680 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.296825 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.369668 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.369744 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvxj8\" (UniqueName: \"kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.369793 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.470979 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.471213 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.471280 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvxj8\" (UniqueName: \"kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.471704 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.471768 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.507376 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvxj8\" (UniqueName: \"kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8\") pod \"redhat-operators-hccv6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.624471 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.830105 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3093a815-0d7f-4490-96cb-87cb11a1eb4a" path="/var/lib/kubelet/pods/3093a815-0d7f-4490-96cb-87cb11a1eb4a/volumes" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.831218 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c6c821-fd44-417d-9352-8f0a3443c80c" path="/var/lib/kubelet/pods/49c6c821-fd44-417d-9352-8f0a3443c80c/volumes" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.832686 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7db08350-41e5-47fc-912f-d2a00aef5fc6" path="/var/lib/kubelet/pods/7db08350-41e5-47fc-912f-d2a00aef5fc6/volumes" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.833412 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a9e18d-b12c-4edd-b7d5-0e976341ab95" path="/var/lib/kubelet/pods/94a9e18d-b12c-4edd-b7d5-0e976341ab95/volumes" Jan 27 20:35:11 crc kubenswrapper[4793]: I0127 20:35:11.836221 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9abb695-6b0a-423e-bccc-a6910c0cafc5" path="/var/lib/kubelet/pods/e9abb695-6b0a-423e-bccc-a6910c0cafc5/volumes" Jan 27 20:35:12 crc kubenswrapper[4793]: I0127 20:35:12.174202 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:12 crc kubenswrapper[4793]: I0127 20:35:12.500309 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerStarted","Data":"286acb07169a1a342d9d65d29742e9727f14395d0dc07324a6d6af5b0fe94dea"} Jan 27 20:35:13 crc kubenswrapper[4793]: I0127 20:35:13.513280 4793 generic.go:334] "Generic (PLEG): container finished" podID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerID="ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22" exitCode=0 Jan 27 20:35:13 crc kubenswrapper[4793]: I0127 20:35:13.513438 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerDied","Data":"ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22"} Jan 27 20:35:13 crc kubenswrapper[4793]: I0127 20:35:13.515874 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:35:14 crc kubenswrapper[4793]: I0127 20:35:14.527301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerStarted","Data":"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585"} Jan 27 20:35:14 crc kubenswrapper[4793]: I0127 20:35:14.804263 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:35:14 crc kubenswrapper[4793]: E0127 20:35:14.804672 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.227358 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ac6d-account-create-update-fd9n2"] Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.237846 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ac6d-account-create-update-fd9n2"] Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.464404 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.466557 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.479008 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.570135 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.570259 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m42jj\" (UniqueName: \"kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.570427 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.672837 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m42jj\" (UniqueName: \"kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.673000 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.673071 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.673636 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.673686 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.691829 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m42jj\" (UniqueName: \"kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj\") pod \"redhat-marketplace-d9tpt\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:16 crc kubenswrapper[4793]: I0127 20:35:16.868104 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.074184 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2z9hn"] Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.084705 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.120789 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2z9hn"] Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.178448 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-catalog-content\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.178595 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-utilities\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.178628 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztth5\" (UniqueName: \"kubernetes.io/projected/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-kube-api-access-ztth5\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.279877 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-catalog-content\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.279991 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-utilities\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.280012 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztth5\" (UniqueName: \"kubernetes.io/projected/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-kube-api-access-ztth5\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.280524 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-catalog-content\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.280585 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-utilities\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.300410 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztth5\" (UniqueName: \"kubernetes.io/projected/7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b-kube-api-access-ztth5\") pod \"certified-operators-2z9hn\" (UID: \"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b\") " pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.391727 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:17 crc kubenswrapper[4793]: W0127 20:35:17.404805 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod728d4dca_4d94_49ea_b700_a0e80901add0.slice/crio-ddad0d948a1fc72549edde3d60a22ee47e8b359d178e8c6390a08403a4d50885 WatchSource:0}: Error finding container ddad0d948a1fc72549edde3d60a22ee47e8b359d178e8c6390a08403a4d50885: Status 404 returned error can't find the container with id ddad0d948a1fc72549edde3d60a22ee47e8b359d178e8c6390a08403a4d50885 Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.418845 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.561447 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerStarted","Data":"ddad0d948a1fc72549edde3d60a22ee47e8b359d178e8c6390a08403a4d50885"} Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.567360 4793 generic.go:334] "Generic (PLEG): container finished" podID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerID="3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585" exitCode=0 Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.567406 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerDied","Data":"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585"} Jan 27 20:35:17 crc kubenswrapper[4793]: I0127 20:35:17.981821 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:35:17 crc kubenswrapper[4793]: E0127 20:35:17.982091 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:35:18 crc kubenswrapper[4793]: I0127 20:35:18.051485 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d" path="/var/lib/kubelet/pods/074b8f4c-0ac5-4a1b-8d9e-e7491dd9758d/volumes" Jan 27 20:35:18 crc kubenswrapper[4793]: I0127 20:35:18.261818 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2z9hn"] Jan 27 20:35:18 crc kubenswrapper[4793]: W0127 20:35:18.403919 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cc048be_c6e0_44b2_aed1_e1f20a5cbb8b.slice/crio-dae9f96e22324ef14225678dda288a7bbf0a27ea473c44a8064b943a2f728193 WatchSource:0}: Error finding container dae9f96e22324ef14225678dda288a7bbf0a27ea473c44a8064b943a2f728193: Status 404 returned error can't find the container with id dae9f96e22324ef14225678dda288a7bbf0a27ea473c44a8064b943a2f728193 Jan 27 20:35:18 crc kubenswrapper[4793]: I0127 20:35:18.580509 4793 generic.go:334] "Generic (PLEG): container finished" podID="728d4dca-4d94-49ea-b700-a0e80901add0" containerID="d174ece0c10fc7f7dd26a0da8e227b8449f2141c7a0cdcb8d663a3b043e42d79" exitCode=0 Jan 27 20:35:18 crc kubenswrapper[4793]: I0127 20:35:18.580572 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerDied","Data":"d174ece0c10fc7f7dd26a0da8e227b8449f2141c7a0cdcb8d663a3b043e42d79"} Jan 27 20:35:18 crc kubenswrapper[4793]: I0127 20:35:18.586114 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2z9hn" event={"ID":"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b","Type":"ContainerStarted","Data":"dae9f96e22324ef14225678dda288a7bbf0a27ea473c44a8064b943a2f728193"} Jan 27 20:35:19 crc kubenswrapper[4793]: I0127 20:35:19.753502 4793 generic.go:334] "Generic (PLEG): container finished" podID="7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b" containerID="50db1c59472222dbb3cc5ea318d92314c5b1114a04dee89b69d68b0a7aa862ee" exitCode=0 Jan 27 20:35:19 crc kubenswrapper[4793]: I0127 20:35:19.754205 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2z9hn" event={"ID":"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b","Type":"ContainerDied","Data":"50db1c59472222dbb3cc5ea318d92314c5b1114a04dee89b69d68b0a7aa862ee"} Jan 27 20:35:19 crc kubenswrapper[4793]: I0127 20:35:19.765936 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerStarted","Data":"381aabad282a7c6841da35f20f6ebedaa0df3710c5cc2359eec35254f2d12b24"} Jan 27 20:35:19 crc kubenswrapper[4793]: I0127 20:35:19.774569 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerStarted","Data":"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9"} Jan 27 20:35:19 crc kubenswrapper[4793]: I0127 20:35:19.812480 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hccv6" podStartSLOduration=3.744021863 podStartE2EDuration="8.812454323s" podCreationTimestamp="2026-01-27 20:35:11 +0000 UTC" firstStartedPulling="2026-01-27 20:35:13.515518867 +0000 UTC m=+1938.905772023" lastFinishedPulling="2026-01-27 20:35:18.583951327 +0000 UTC m=+1943.974204483" observedRunningTime="2026-01-27 20:35:19.802867204 +0000 UTC m=+1945.193120360" watchObservedRunningTime="2026-01-27 20:35:19.812454323 +0000 UTC m=+1945.202707499" Jan 27 20:35:20 crc kubenswrapper[4793]: I0127 20:35:20.787489 4793 generic.go:334] "Generic (PLEG): container finished" podID="728d4dca-4d94-49ea-b700-a0e80901add0" containerID="381aabad282a7c6841da35f20f6ebedaa0df3710c5cc2359eec35254f2d12b24" exitCode=0 Jan 27 20:35:20 crc kubenswrapper[4793]: I0127 20:35:20.787578 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerDied","Data":"381aabad282a7c6841da35f20f6ebedaa0df3710c5cc2359eec35254f2d12b24"} Jan 27 20:35:21 crc kubenswrapper[4793]: I0127 20:35:21.626011 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:21 crc kubenswrapper[4793]: I0127 20:35:21.626108 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:21 crc kubenswrapper[4793]: I0127 20:35:21.799987 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerStarted","Data":"c51638b8f4e00b07d39c8a7e274738711ad766abe556623a343fec281c01cc1a"} Jan 27 20:35:21 crc kubenswrapper[4793]: I0127 20:35:21.827530 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d9tpt" podStartSLOduration=2.943538249 podStartE2EDuration="5.82750907s" podCreationTimestamp="2026-01-27 20:35:16 +0000 UTC" firstStartedPulling="2026-01-27 20:35:18.582026788 +0000 UTC m=+1943.972279944" lastFinishedPulling="2026-01-27 20:35:21.465997609 +0000 UTC m=+1946.856250765" observedRunningTime="2026-01-27 20:35:21.822391083 +0000 UTC m=+1947.212644249" watchObservedRunningTime="2026-01-27 20:35:21.82750907 +0000 UTC m=+1947.217762226" Jan 27 20:35:22 crc kubenswrapper[4793]: I0127 20:35:22.686976 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hccv6" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="registry-server" probeResult="failure" output=< Jan 27 20:35:22 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:35:22 crc kubenswrapper[4793]: > Jan 27 20:35:25 crc kubenswrapper[4793]: I0127 20:35:25.857387 4793 generic.go:334] "Generic (PLEG): container finished" podID="7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b" containerID="e48a639fbd3a0f55fffe6bdba5d8017616a816d07a1d247f3311e4f0305eb35b" exitCode=0 Jan 27 20:35:25 crc kubenswrapper[4793]: I0127 20:35:25.857477 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2z9hn" event={"ID":"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b","Type":"ContainerDied","Data":"e48a639fbd3a0f55fffe6bdba5d8017616a816d07a1d247f3311e4f0305eb35b"} Jan 27 20:35:26 crc kubenswrapper[4793]: I0127 20:35:26.868404 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:26 crc kubenswrapper[4793]: I0127 20:35:26.869969 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2z9hn" event={"ID":"7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b","Type":"ContainerStarted","Data":"b292a954b8ce86b06ce69561fb2992884063d54afea611494ae9ef8a2c62976d"} Jan 27 20:35:26 crc kubenswrapper[4793]: I0127 20:35:26.870132 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:26 crc kubenswrapper[4793]: I0127 20:35:26.900933 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2z9hn" podStartSLOduration=3.396970448 podStartE2EDuration="9.900909137s" podCreationTimestamp="2026-01-27 20:35:17 +0000 UTC" firstStartedPulling="2026-01-27 20:35:19.75999262 +0000 UTC m=+1945.150245776" lastFinishedPulling="2026-01-27 20:35:26.263931309 +0000 UTC m=+1951.654184465" observedRunningTime="2026-01-27 20:35:26.889293099 +0000 UTC m=+1952.279546285" watchObservedRunningTime="2026-01-27 20:35:26.900909137 +0000 UTC m=+1952.291162283" Jan 27 20:35:26 crc kubenswrapper[4793]: I0127 20:35:26.919623 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:27 crc kubenswrapper[4793]: I0127 20:35:27.459572 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:27 crc kubenswrapper[4793]: I0127 20:35:27.459656 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:27 crc kubenswrapper[4793]: I0127 20:35:27.803163 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:35:27 crc kubenswrapper[4793]: E0127 20:35:27.803518 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:35:27 crc kubenswrapper[4793]: I0127 20:35:27.932804 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:28 crc kubenswrapper[4793]: I0127 20:35:28.452523 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:28 crc kubenswrapper[4793]: I0127 20:35:28.511531 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-2z9hn" podUID="7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b" containerName="registry-server" probeResult="failure" output=< Jan 27 20:35:28 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:35:28 crc kubenswrapper[4793]: > Jan 27 20:35:29 crc kubenswrapper[4793]: I0127 20:35:29.851324 4793 scope.go:117] "RemoveContainer" containerID="b13ae1e4db9ffa87d15ad9cff548164273cd413600e684bb90e9e745d6b22725" Jan 27 20:35:29 crc kubenswrapper[4793]: I0127 20:35:29.887093 4793 scope.go:117] "RemoveContainer" containerID="e69b45fbb1ba4b545adc21dce64abe8295dd66bf43f67384b51bc79a3ebc1d01" Jan 27 20:35:29 crc kubenswrapper[4793]: I0127 20:35:29.900505 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d9tpt" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="registry-server" containerID="cri-o://c51638b8f4e00b07d39c8a7e274738711ad766abe556623a343fec281c01cc1a" gracePeriod=2 Jan 27 20:35:29 crc kubenswrapper[4793]: I0127 20:35:29.961971 4793 scope.go:117] "RemoveContainer" containerID="756ef12bab39ddd06e8c9afefa8cc304473802c6b77c88cffc79bbf743d899fa" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.099430 4793 scope.go:117] "RemoveContainer" containerID="752aff503dc687269961023c25f7b1d7d9241bfd3057ee6adef0215de8df6e29" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.141765 4793 scope.go:117] "RemoveContainer" containerID="6870cc92f73decd303d4bf2a0f0b1c78ca50b0f9abdd6b0a6b39b661d987fb3e" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.167069 4793 scope.go:117] "RemoveContainer" containerID="2b480f404d36e6496570497fb774c548ea2f902fba91032176a35eaa1224d025" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.231218 4793 scope.go:117] "RemoveContainer" containerID="95bf2cf70043fc83d341b65f0b8311c6a7cb175fdebc8e3c7e7a8c45e924a3c4" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.256782 4793 scope.go:117] "RemoveContainer" containerID="e54c839c21db618b9435b0607c371ed564b02bbcb545995ebe577d81cb20d308" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.277963 4793 scope.go:117] "RemoveContainer" containerID="076738602b49bc88fc9f86f069a04488a88a980bbbaf538c27c2cddee88d4eeb" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.300811 4793 scope.go:117] "RemoveContainer" containerID="aba883d1a79edfa666fd45bf1e958b02f02ba7a54b880845ba4c2dfa9d2b9c96" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.420177 4793 scope.go:117] "RemoveContainer" containerID="37ddeb194d858320bbff390426131055888110731a421183cbcb3424e8dad9c4" Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.913205 4793 generic.go:334] "Generic (PLEG): container finished" podID="728d4dca-4d94-49ea-b700-a0e80901add0" containerID="c51638b8f4e00b07d39c8a7e274738711ad766abe556623a343fec281c01cc1a" exitCode=0 Jan 27 20:35:30 crc kubenswrapper[4793]: I0127 20:35:30.913281 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerDied","Data":"c51638b8f4e00b07d39c8a7e274738711ad766abe556623a343fec281c01cc1a"} Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.058396 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.252175 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities\") pod \"728d4dca-4d94-49ea-b700-a0e80901add0\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.252301 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m42jj\" (UniqueName: \"kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj\") pod \"728d4dca-4d94-49ea-b700-a0e80901add0\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.252435 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content\") pod \"728d4dca-4d94-49ea-b700-a0e80901add0\" (UID: \"728d4dca-4d94-49ea-b700-a0e80901add0\") " Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.253270 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities" (OuterVolumeSpecName: "utilities") pod "728d4dca-4d94-49ea-b700-a0e80901add0" (UID: "728d4dca-4d94-49ea-b700-a0e80901add0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.265927 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj" (OuterVolumeSpecName: "kube-api-access-m42jj") pod "728d4dca-4d94-49ea-b700-a0e80901add0" (UID: "728d4dca-4d94-49ea-b700-a0e80901add0"). InnerVolumeSpecName "kube-api-access-m42jj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.280668 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "728d4dca-4d94-49ea-b700-a0e80901add0" (UID: "728d4dca-4d94-49ea-b700-a0e80901add0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.354972 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.355015 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m42jj\" (UniqueName: \"kubernetes.io/projected/728d4dca-4d94-49ea-b700-a0e80901add0-kube-api-access-m42jj\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.355030 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/728d4dca-4d94-49ea-b700-a0e80901add0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.691336 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.740487 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.803594 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:35:31 crc kubenswrapper[4793]: E0127 20:35:31.803920 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.926185 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9tpt" event={"ID":"728d4dca-4d94-49ea-b700-a0e80901add0","Type":"ContainerDied","Data":"ddad0d948a1fc72549edde3d60a22ee47e8b359d178e8c6390a08403a4d50885"} Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.926228 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9tpt" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.926252 4793 scope.go:117] "RemoveContainer" containerID="c51638b8f4e00b07d39c8a7e274738711ad766abe556623a343fec281c01cc1a" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.954145 4793 scope.go:117] "RemoveContainer" containerID="381aabad282a7c6841da35f20f6ebedaa0df3710c5cc2359eec35254f2d12b24" Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.956303 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.973981 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9tpt"] Jan 27 20:35:31 crc kubenswrapper[4793]: I0127 20:35:31.982238 4793 scope.go:117] "RemoveContainer" containerID="d174ece0c10fc7f7dd26a0da8e227b8449f2141c7a0cdcb8d663a3b043e42d79" Jan 27 20:35:33 crc kubenswrapper[4793]: I0127 20:35:33.816445 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" path="/var/lib/kubelet/pods/728d4dca-4d94-49ea-b700-a0e80901add0/volumes" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.058521 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.059054 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hccv6" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="registry-server" containerID="cri-o://58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9" gracePeriod=2 Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.525101 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.533167 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvxj8\" (UniqueName: \"kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8\") pod \"353cdca2-768d-404f-9c1f-af25dc0192e6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.533332 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities\") pod \"353cdca2-768d-404f-9c1f-af25dc0192e6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.533402 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content\") pod \"353cdca2-768d-404f-9c1f-af25dc0192e6\" (UID: \"353cdca2-768d-404f-9c1f-af25dc0192e6\") " Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.533914 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities" (OuterVolumeSpecName: "utilities") pod "353cdca2-768d-404f-9c1f-af25dc0192e6" (UID: "353cdca2-768d-404f-9c1f-af25dc0192e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.538806 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8" (OuterVolumeSpecName: "kube-api-access-kvxj8") pod "353cdca2-768d-404f-9c1f-af25dc0192e6" (UID: "353cdca2-768d-404f-9c1f-af25dc0192e6"). InnerVolumeSpecName "kube-api-access-kvxj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.635170 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.635220 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvxj8\" (UniqueName: \"kubernetes.io/projected/353cdca2-768d-404f-9c1f-af25dc0192e6-kube-api-access-kvxj8\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.645863 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "353cdca2-768d-404f-9c1f-af25dc0192e6" (UID: "353cdca2-768d-404f-9c1f-af25dc0192e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.736851 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/353cdca2-768d-404f-9c1f-af25dc0192e6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.958002 4793 generic.go:334] "Generic (PLEG): container finished" podID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerID="58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9" exitCode=0 Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.958100 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hccv6" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.958132 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerDied","Data":"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9"} Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.958412 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hccv6" event={"ID":"353cdca2-768d-404f-9c1f-af25dc0192e6","Type":"ContainerDied","Data":"286acb07169a1a342d9d65d29742e9727f14395d0dc07324a6d6af5b0fe94dea"} Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.958434 4793 scope.go:117] "RemoveContainer" containerID="58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9" Jan 27 20:35:34 crc kubenswrapper[4793]: I0127 20:35:34.978618 4793 scope.go:117] "RemoveContainer" containerID="3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.002174 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.012200 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hccv6"] Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.013590 4793 scope.go:117] "RemoveContainer" containerID="ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.064941 4793 scope.go:117] "RemoveContainer" containerID="58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9" Jan 27 20:35:35 crc kubenswrapper[4793]: E0127 20:35:35.065723 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9\": container with ID starting with 58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9 not found: ID does not exist" containerID="58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.065757 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9"} err="failed to get container status \"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9\": rpc error: code = NotFound desc = could not find container \"58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9\": container with ID starting with 58f410c1e57864e81a5ea2cefa02fc0b11aeb2bf729183f34b193c68950afff9 not found: ID does not exist" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.065777 4793 scope.go:117] "RemoveContainer" containerID="3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585" Jan 27 20:35:35 crc kubenswrapper[4793]: E0127 20:35:35.066049 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585\": container with ID starting with 3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585 not found: ID does not exist" containerID="3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.066069 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585"} err="failed to get container status \"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585\": rpc error: code = NotFound desc = could not find container \"3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585\": container with ID starting with 3c69527bfcb97b4ea66f852d0534e19b25ab74da8b7e144fd0481001a1904585 not found: ID does not exist" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.066081 4793 scope.go:117] "RemoveContainer" containerID="ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22" Jan 27 20:35:35 crc kubenswrapper[4793]: E0127 20:35:35.066379 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22\": container with ID starting with ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22 not found: ID does not exist" containerID="ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.066411 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22"} err="failed to get container status \"ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22\": rpc error: code = NotFound desc = could not find container \"ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22\": container with ID starting with ca1bc992005fd33df6253cb03c1bb2d3bd697f274a96a43af252066e2dccea22 not found: ID does not exist" Jan 27 20:35:35 crc kubenswrapper[4793]: I0127 20:35:35.815834 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" path="/var/lib/kubelet/pods/353cdca2-768d-404f-9c1f-af25dc0192e6/volumes" Jan 27 20:35:37 crc kubenswrapper[4793]: I0127 20:35:37.470205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:37 crc kubenswrapper[4793]: I0127 20:35:37.521224 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2z9hn" Jan 27 20:35:38 crc kubenswrapper[4793]: I0127 20:35:38.050396 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-tsmxm"] Jan 27 20:35:38 crc kubenswrapper[4793]: I0127 20:35:38.059455 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-tsmxm"] Jan 27 20:35:38 crc kubenswrapper[4793]: I0127 20:35:38.484201 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2z9hn"] Jan 27 20:35:38 crc kubenswrapper[4793]: I0127 20:35:38.664202 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:35:38 crc kubenswrapper[4793]: I0127 20:35:38.664512 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jjsf7" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="registry-server" containerID="cri-o://21aa9d714ae0a321386da7ef447a780192a9faaf6bcb8784c7565f7ab18713b6" gracePeriod=2 Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.012615 4793 generic.go:334] "Generic (PLEG): container finished" podID="b008ec09-553c-474f-9176-14405d193a65" containerID="21aa9d714ae0a321386da7ef447a780192a9faaf6bcb8784c7565f7ab18713b6" exitCode=0 Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.013021 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerDied","Data":"21aa9d714ae0a321386da7ef447a780192a9faaf6bcb8784c7565f7ab18713b6"} Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.331610 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.443026 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content\") pod \"b008ec09-553c-474f-9176-14405d193a65\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.443151 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwqw5\" (UniqueName: \"kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5\") pod \"b008ec09-553c-474f-9176-14405d193a65\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.443309 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities\") pod \"b008ec09-553c-474f-9176-14405d193a65\" (UID: \"b008ec09-553c-474f-9176-14405d193a65\") " Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.446264 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities" (OuterVolumeSpecName: "utilities") pod "b008ec09-553c-474f-9176-14405d193a65" (UID: "b008ec09-553c-474f-9176-14405d193a65"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.456752 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5" (OuterVolumeSpecName: "kube-api-access-mwqw5") pod "b008ec09-553c-474f-9176-14405d193a65" (UID: "b008ec09-553c-474f-9176-14405d193a65"). InnerVolumeSpecName "kube-api-access-mwqw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.551812 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwqw5\" (UniqueName: \"kubernetes.io/projected/b008ec09-553c-474f-9176-14405d193a65-kube-api-access-mwqw5\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.551847 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.560895 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b008ec09-553c-474f-9176-14405d193a65" (UID: "b008ec09-553c-474f-9176-14405d193a65"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.654763 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b008ec09-553c-474f-9176-14405d193a65-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.803240 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:35:39 crc kubenswrapper[4793]: E0127 20:35:39.803613 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:35:39 crc kubenswrapper[4793]: I0127 20:35:39.815378 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5" path="/var/lib/kubelet/pods/b48fd5d5-4b6b-47a7-9512-509e2ad7f1f5/volumes" Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.047146 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjsf7" event={"ID":"b008ec09-553c-474f-9176-14405d193a65","Type":"ContainerDied","Data":"bc58c924170fe76240d6c7c1d018e1d96e50b10e74dfc5298a8bf79db1e2223e"} Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.047239 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjsf7" Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.047615 4793 scope.go:117] "RemoveContainer" containerID="21aa9d714ae0a321386da7ef447a780192a9faaf6bcb8784c7565f7ab18713b6" Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.084901 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.092216 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jjsf7"] Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.094189 4793 scope.go:117] "RemoveContainer" containerID="bb28b38607830e661ec67b365ef3c75d56fba4877068c15f480b81515b3e7cd8" Jan 27 20:35:40 crc kubenswrapper[4793]: I0127 20:35:40.124727 4793 scope.go:117] "RemoveContainer" containerID="5ca24c2da78c21441326a609298e9a3254d53e94d29dfeed39407d64f2b16125" Jan 27 20:35:41 crc kubenswrapper[4793]: I0127 20:35:41.817356 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b008ec09-553c-474f-9176-14405d193a65" path="/var/lib/kubelet/pods/b008ec09-553c-474f-9176-14405d193a65/volumes" Jan 27 20:35:44 crc kubenswrapper[4793]: I0127 20:35:44.803670 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:35:44 crc kubenswrapper[4793]: E0127 20:35:44.803974 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:35:53 crc kubenswrapper[4793]: I0127 20:35:53.804308 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:35:54 crc kubenswrapper[4793]: I0127 20:35:54.299882 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3"} Jan 27 20:35:56 crc kubenswrapper[4793]: I0127 20:35:56.803917 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:35:56 crc kubenswrapper[4793]: E0127 20:35:56.804871 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:36:10 crc kubenswrapper[4793]: I0127 20:36:10.804256 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:36:10 crc kubenswrapper[4793]: E0127 20:36:10.805066 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:36:25 crc kubenswrapper[4793]: I0127 20:36:25.816054 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:36:25 crc kubenswrapper[4793]: E0127 20:36:25.817061 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:36:27 crc kubenswrapper[4793]: I0127 20:36:27.048543 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-mvcq4"] Jan 27 20:36:27 crc kubenswrapper[4793]: I0127 20:36:27.058368 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-mvcq4"] Jan 27 20:36:27 crc kubenswrapper[4793]: I0127 20:36:27.817866 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a01af79-2bf8-40d8-aa3b-5ea2df7b6941" path="/var/lib/kubelet/pods/7a01af79-2bf8-40d8-aa3b-5ea2df7b6941/volumes" Jan 27 20:36:30 crc kubenswrapper[4793]: I0127 20:36:30.724344 4793 scope.go:117] "RemoveContainer" containerID="2c324627880916785a1f5e7c9b641a070bad1fb19c4169dd1d63b48f7fb0d97a" Jan 27 20:36:30 crc kubenswrapper[4793]: I0127 20:36:30.769343 4793 scope.go:117] "RemoveContainer" containerID="630ba15635812f4b701dae61df9462382e9d806e856d29da60cdaa91f6c68caa" Jan 27 20:36:37 crc kubenswrapper[4793]: I0127 20:36:37.046321 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ldntm"] Jan 27 20:36:37 crc kubenswrapper[4793]: I0127 20:36:37.064223 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ldntm"] Jan 27 20:36:37 crc kubenswrapper[4793]: I0127 20:36:37.818607 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc67d37b-e18b-47bc-8328-f1f3145f9dc9" path="/var/lib/kubelet/pods/cc67d37b-e18b-47bc-8328-f1f3145f9dc9/volumes" Jan 27 20:36:38 crc kubenswrapper[4793]: I0127 20:36:38.032228 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dbdlw"] Jan 27 20:36:38 crc kubenswrapper[4793]: I0127 20:36:38.043033 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dbdlw"] Jan 27 20:36:39 crc kubenswrapper[4793]: I0127 20:36:39.028071 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-gds2t"] Jan 27 20:36:39 crc kubenswrapper[4793]: I0127 20:36:39.035594 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-gds2t"] Jan 27 20:36:39 crc kubenswrapper[4793]: I0127 20:36:39.815141 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03131418-ea5d-47bd-906c-8a93c2712b1c" path="/var/lib/kubelet/pods/03131418-ea5d-47bd-906c-8a93c2712b1c/volumes" Jan 27 20:36:39 crc kubenswrapper[4793]: I0127 20:36:39.816274 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889b7838-f7be-4969-a167-9ff1b6ce04ef" path="/var/lib/kubelet/pods/889b7838-f7be-4969-a167-9ff1b6ce04ef/volumes" Jan 27 20:36:40 crc kubenswrapper[4793]: I0127 20:36:40.803784 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:36:40 crc kubenswrapper[4793]: E0127 20:36:40.804179 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:36:53 crc kubenswrapper[4793]: I0127 20:36:53.803198 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:36:53 crc kubenswrapper[4793]: E0127 20:36:53.803946 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:37:05 crc kubenswrapper[4793]: I0127 20:37:05.813775 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:37:05 crc kubenswrapper[4793]: E0127 20:37:05.815769 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:37:11 crc kubenswrapper[4793]: I0127 20:37:11.040387 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-wlfpp"] Jan 27 20:37:11 crc kubenswrapper[4793]: I0127 20:37:11.049942 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-wlfpp"] Jan 27 20:37:11 crc kubenswrapper[4793]: I0127 20:37:11.832832 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64b480ab-f615-4d0b-9b56-ef6d0acf8955" path="/var/lib/kubelet/pods/64b480ab-f615-4d0b-9b56-ef6d0acf8955/volumes" Jan 27 20:37:12 crc kubenswrapper[4793]: I0127 20:37:12.041140 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-cthwx"] Jan 27 20:37:12 crc kubenswrapper[4793]: I0127 20:37:12.051379 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-cthwx"] Jan 27 20:37:13 crc kubenswrapper[4793]: I0127 20:37:13.813699 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce6efd1-1d02-4e0c-bb44-3e2daac046bd" path="/var/lib/kubelet/pods/4ce6efd1-1d02-4e0c-bb44-3e2daac046bd/volumes" Jan 27 20:37:17 crc kubenswrapper[4793]: I0127 20:37:17.803789 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:37:17 crc kubenswrapper[4793]: E0127 20:37:17.804140 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:37:18 crc kubenswrapper[4793]: I0127 20:37:18.030658 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-cql8q"] Jan 27 20:37:18 crc kubenswrapper[4793]: I0127 20:37:18.040134 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-cql8q"] Jan 27 20:37:19 crc kubenswrapper[4793]: I0127 20:37:19.814656 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a07ca8f7-3387-4f58-a094-26d491028752" path="/var/lib/kubelet/pods/a07ca8f7-3387-4f58-a094-26d491028752/volumes" Jan 27 20:37:30 crc kubenswrapper[4793]: I0127 20:37:30.917704 4793 scope.go:117] "RemoveContainer" containerID="c86439cf944a06eaa2f91514107ad288144f532ea1940bd584201638b93861dd" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.208675 4793 scope.go:117] "RemoveContainer" containerID="d2475a4da699bc85b2811da31a71ec1292a305fcfd89bf10807ca73a96f9deb7" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.266150 4793 scope.go:117] "RemoveContainer" containerID="b57ae8118283dabfe483c25d17c0911a8fe523b9a5aa9409b7e40dc7832e2ab8" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.341905 4793 scope.go:117] "RemoveContainer" containerID="32c84a319d62d23053311a2833bc78d2a80cea5c80329bf5e109babdcb818bb4" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.382964 4793 scope.go:117] "RemoveContainer" containerID="4ca38b74a3e18d4a88f92129263b6a2af7e1b488760f5e51943c8f3a6dcbe30d" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.440861 4793 scope.go:117] "RemoveContainer" containerID="ac49f5f979165c4e7a819a2acfcda5cc4faa76cf76e1f1272e99b26b776b3b8b" Jan 27 20:37:31 crc kubenswrapper[4793]: I0127 20:37:31.805092 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:37:31 crc kubenswrapper[4793]: E0127 20:37:31.805662 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:37:45 crc kubenswrapper[4793]: I0127 20:37:45.811771 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:37:45 crc kubenswrapper[4793]: E0127 20:37:45.812718 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:37:58 crc kubenswrapper[4793]: I0127 20:37:58.803900 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.197019 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-7g4f4"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.209636 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-wfkh5"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.223723 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-wfkh5"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.232837 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-nmlw2"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.241203 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-7g4f4"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.250175 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-nmlw2"] Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.817130 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b87e83c5-3906-4d5e-ae19-58ed6148d219" path="/var/lib/kubelet/pods/b87e83c5-3906-4d5e-ae19-58ed6148d219/volumes" Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.818883 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3a94a7d-379f-4d4d-8728-3b1509189b93" path="/var/lib/kubelet/pods/c3a94a7d-379f-4d4d-8728-3b1509189b93/volumes" Jan 27 20:37:59 crc kubenswrapper[4793]: I0127 20:37:59.819973 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdaa4741-780c-464b-80cc-64eeaf8607de" path="/var/lib/kubelet/pods/cdaa4741-780c-464b-80cc-64eeaf8607de/volumes" Jan 27 20:38:00 crc kubenswrapper[4793]: I0127 20:38:00.096139 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-9878-account-create-update-t2hhx"] Jan 27 20:38:00 crc kubenswrapper[4793]: I0127 20:38:00.107111 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-9878-account-create-update-t2hhx"] Jan 27 20:38:00 crc kubenswrapper[4793]: I0127 20:38:00.117469 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-b226-account-create-update-8gr8q"] Jan 27 20:38:00 crc kubenswrapper[4793]: I0127 20:38:00.131180 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-b226-account-create-update-8gr8q"] Jan 27 20:38:00 crc kubenswrapper[4793]: I0127 20:38:00.279720 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d"} Jan 27 20:38:01 crc kubenswrapper[4793]: I0127 20:38:01.046741 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7b87-account-create-update-qrtvs"] Jan 27 20:38:01 crc kubenswrapper[4793]: I0127 20:38:01.057817 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7b87-account-create-update-qrtvs"] Jan 27 20:38:01 crc kubenswrapper[4793]: I0127 20:38:01.819324 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58fcdaca-8f5a-4bac-8b2f-754e27d164c0" path="/var/lib/kubelet/pods/58fcdaca-8f5a-4bac-8b2f-754e27d164c0/volumes" Jan 27 20:38:01 crc kubenswrapper[4793]: I0127 20:38:01.821187 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="607b194b-5aaa-4b00-92f1-3448913e04f5" path="/var/lib/kubelet/pods/607b194b-5aaa-4b00-92f1-3448913e04f5/volumes" Jan 27 20:38:01 crc kubenswrapper[4793]: I0127 20:38:01.822165 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f214dfe-47ca-4d9c-804b-1672e923954f" path="/var/lib/kubelet/pods/8f214dfe-47ca-4d9c-804b-1672e923954f/volumes" Jan 27 20:38:02 crc kubenswrapper[4793]: I0127 20:38:02.299786 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" exitCode=1 Jan 27 20:38:02 crc kubenswrapper[4793]: I0127 20:38:02.299836 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d"} Jan 27 20:38:02 crc kubenswrapper[4793]: I0127 20:38:02.299877 4793 scope.go:117] "RemoveContainer" containerID="97d7a490f73efbef544351ad6880294ee89faa57bc52506c67481181e4b96ed2" Jan 27 20:38:02 crc kubenswrapper[4793]: I0127 20:38:02.300939 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:02 crc kubenswrapper[4793]: E0127 20:38:02.301332 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:03 crc kubenswrapper[4793]: I0127 20:38:03.242970 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:38:03 crc kubenswrapper[4793]: I0127 20:38:03.310750 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:03 crc kubenswrapper[4793]: E0127 20:38:03.311023 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:08 crc kubenswrapper[4793]: I0127 20:38:08.243230 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:38:08 crc kubenswrapper[4793]: I0127 20:38:08.243811 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:38:08 crc kubenswrapper[4793]: I0127 20:38:08.243825 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:38:08 crc kubenswrapper[4793]: I0127 20:38:08.244681 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:08 crc kubenswrapper[4793]: E0127 20:38:08.244969 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:08 crc kubenswrapper[4793]: I0127 20:38:08.360820 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:08 crc kubenswrapper[4793]: E0127 20:38:08.361109 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:22 crc kubenswrapper[4793]: I0127 20:38:22.753800 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:38:22 crc kubenswrapper[4793]: I0127 20:38:22.754402 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:38:22 crc kubenswrapper[4793]: I0127 20:38:22.803978 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:22 crc kubenswrapper[4793]: E0127 20:38:22.804274 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.614690 4793 scope.go:117] "RemoveContainer" containerID="36d7d084b7b3f438480744f9d357cbf378f30ef65bc8f0af760c5c7d8163c381" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.652173 4793 scope.go:117] "RemoveContainer" containerID="bebbcdb2a19d935441d6ccfb6fc1c2ed85c30df8f41ee92b1a592e2bdd9eb3a9" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.730019 4793 scope.go:117] "RemoveContainer" containerID="723fc037554f7a2646d72bd56724f57cd2b241c875a3c28b6df0a1e802ef2a8f" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.814765 4793 scope.go:117] "RemoveContainer" containerID="1b4ed18fe12fbfaf97b29cb2313f8c79eaf86a20544b6f46665edbe435410816" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.864660 4793 scope.go:117] "RemoveContainer" containerID="9083285707e22bb5f6df9bdd6e46984b96a26404f89026386e8eaea6047f945f" Jan 27 20:38:31 crc kubenswrapper[4793]: I0127 20:38:31.916061 4793 scope.go:117] "RemoveContainer" containerID="1e9e2fc7de5e6103d89879152339fba2c05a835aa8141018c916389dadb8cf03" Jan 27 20:38:37 crc kubenswrapper[4793]: I0127 20:38:37.803371 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:37 crc kubenswrapper[4793]: E0127 20:38:37.804346 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:42 crc kubenswrapper[4793]: I0127 20:38:42.046670 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kdwlw"] Jan 27 20:38:42 crc kubenswrapper[4793]: I0127 20:38:42.055493 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kdwlw"] Jan 27 20:38:43 crc kubenswrapper[4793]: I0127 20:38:43.813394 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="599d34c5-5606-4125-865c-ff142d5fce8d" path="/var/lib/kubelet/pods/599d34c5-5606-4125-865c-ff142d5fce8d/volumes" Jan 27 20:38:48 crc kubenswrapper[4793]: I0127 20:38:48.804471 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:38:48 crc kubenswrapper[4793]: E0127 20:38:48.805314 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:38:52 crc kubenswrapper[4793]: I0127 20:38:52.754022 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:38:52 crc kubenswrapper[4793]: I0127 20:38:52.754104 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:39:00 crc kubenswrapper[4793]: I0127 20:39:00.803495 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:00 crc kubenswrapper[4793]: E0127 20:39:00.804336 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:08 crc kubenswrapper[4793]: I0127 20:39:08.057837 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-hszbk"] Jan 27 20:39:08 crc kubenswrapper[4793]: I0127 20:39:08.067764 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-hszbk"] Jan 27 20:39:09 crc kubenswrapper[4793]: I0127 20:39:09.823187 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cf3de60-1232-4828-b1a7-77e1f483bfff" path="/var/lib/kubelet/pods/6cf3de60-1232-4828-b1a7-77e1f483bfff/volumes" Jan 27 20:39:11 crc kubenswrapper[4793]: I0127 20:39:11.804831 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:11 crc kubenswrapper[4793]: E0127 20:39:11.805575 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:20 crc kubenswrapper[4793]: I0127 20:39:20.032047 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8fwdb"] Jan 27 20:39:20 crc kubenswrapper[4793]: I0127 20:39:20.042508 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-8fwdb"] Jan 27 20:39:21 crc kubenswrapper[4793]: I0127 20:39:21.830496 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5" path="/var/lib/kubelet/pods/5ba52741-6cd6-4a04-9aa3-25a39cd2e6f5/volumes" Jan 27 20:39:22 crc kubenswrapper[4793]: I0127 20:39:22.754039 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:39:22 crc kubenswrapper[4793]: I0127 20:39:22.754383 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:39:22 crc kubenswrapper[4793]: I0127 20:39:22.754464 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:39:22 crc kubenswrapper[4793]: I0127 20:39:22.755342 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:39:22 crc kubenswrapper[4793]: I0127 20:39:22.755409 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3" gracePeriod=600 Jan 27 20:39:23 crc kubenswrapper[4793]: I0127 20:39:23.124967 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3" exitCode=0 Jan 27 20:39:23 crc kubenswrapper[4793]: I0127 20:39:23.125069 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3"} Jan 27 20:39:23 crc kubenswrapper[4793]: I0127 20:39:23.125418 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f"} Jan 27 20:39:23 crc kubenswrapper[4793]: I0127 20:39:23.125447 4793 scope.go:117] "RemoveContainer" containerID="0413fcf1099978e298e7574a6dd3e0a8339a3399b0677756604239707f488fd3" Jan 27 20:39:23 crc kubenswrapper[4793]: I0127 20:39:23.804012 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:23 crc kubenswrapper[4793]: E0127 20:39:23.804426 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:32 crc kubenswrapper[4793]: I0127 20:39:32.120821 4793 scope.go:117] "RemoveContainer" containerID="3912a4d44e5f4f2e1539bb1f0a7a5ecd887fe2a897d7a2c82a811d6f959032c3" Jan 27 20:39:32 crc kubenswrapper[4793]: I0127 20:39:32.189759 4793 scope.go:117] "RemoveContainer" containerID="c310c8f3cd5f9eccfaa63e08eeaf120a61177eb1d1629a9b4b97725acdc11bcb" Jan 27 20:39:32 crc kubenswrapper[4793]: I0127 20:39:32.261163 4793 scope.go:117] "RemoveContainer" containerID="03a624663d3f8252c079e82c5c545675355c90afff8f16a92e4b98ca0e4ac650" Jan 27 20:39:34 crc kubenswrapper[4793]: I0127 20:39:34.804218 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:34 crc kubenswrapper[4793]: E0127 20:39:34.805107 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:48 crc kubenswrapper[4793]: I0127 20:39:48.804445 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:48 crc kubenswrapper[4793]: E0127 20:39:48.805253 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:58 crc kubenswrapper[4793]: I0127 20:39:58.046119 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-pn9t9"] Jan 27 20:39:58 crc kubenswrapper[4793]: I0127 20:39:58.054217 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-pn9t9"] Jan 27 20:39:59 crc kubenswrapper[4793]: I0127 20:39:59.803392 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:39:59 crc kubenswrapper[4793]: E0127 20:39:59.803958 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:39:59 crc kubenswrapper[4793]: I0127 20:39:59.816640 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="584f9fed-a572-4b61-8b51-f1178a6cfa76" path="/var/lib/kubelet/pods/584f9fed-a572-4b61-8b51-f1178a6cfa76/volumes" Jan 27 20:40:13 crc kubenswrapper[4793]: I0127 20:40:13.803695 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:40:13 crc kubenswrapper[4793]: E0127 20:40:13.804520 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:40:27 crc kubenswrapper[4793]: I0127 20:40:27.804914 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:40:27 crc kubenswrapper[4793]: E0127 20:40:27.806126 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:40:32 crc kubenswrapper[4793]: I0127 20:40:32.368130 4793 scope.go:117] "RemoveContainer" containerID="57a8180be006032de92a7cc9b237d8a8ded708228f98914c0d60febc5fb8fd7c" Jan 27 20:40:42 crc kubenswrapper[4793]: I0127 20:40:42.803958 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:40:42 crc kubenswrapper[4793]: E0127 20:40:42.804798 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:40:55 crc kubenswrapper[4793]: I0127 20:40:55.816392 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:40:55 crc kubenswrapper[4793]: E0127 20:40:55.817888 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.715915 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716856 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716868 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716881 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716887 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716894 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716901 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716946 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716954 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716966 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716973 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.716987 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.716994 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.717017 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717024 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="extract-utilities" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.717041 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717046 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.717059 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717065 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="extract-content" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717243 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b008ec09-553c-474f-9176-14405d193a65" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717274 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="353cdca2-768d-404f-9c1f-af25dc0192e6" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.717288 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="728d4dca-4d94-49ea-b700-a0e80901add0" containerName="registry-server" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.719515 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.730635 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.803283 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:41:08 crc kubenswrapper[4793]: E0127 20:41:08.803630 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.921722 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.921869 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:08 crc kubenswrapper[4793]: I0127 20:41:08.921939 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brrqd\" (UniqueName: \"kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.023398 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.023855 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.024049 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brrqd\" (UniqueName: \"kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.024124 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.024243 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.057459 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brrqd\" (UniqueName: \"kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd\") pod \"community-operators-hp4x4\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.344318 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:09 crc kubenswrapper[4793]: I0127 20:41:09.836661 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:10 crc kubenswrapper[4793]: I0127 20:41:10.513735 4793 generic.go:334] "Generic (PLEG): container finished" podID="ed2828e1-80b5-4e29-8980-c1b177608222" containerID="43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88" exitCode=0 Jan 27 20:41:10 crc kubenswrapper[4793]: I0127 20:41:10.513795 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerDied","Data":"43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88"} Jan 27 20:41:10 crc kubenswrapper[4793]: I0127 20:41:10.514139 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerStarted","Data":"f71cbb49e42234290408e7db9dd55b646165ea6e99f277876ad2e54c460970ba"} Jan 27 20:41:10 crc kubenswrapper[4793]: I0127 20:41:10.517921 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:41:11 crc kubenswrapper[4793]: I0127 20:41:11.523629 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerStarted","Data":"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c"} Jan 27 20:41:12 crc kubenswrapper[4793]: I0127 20:41:12.538886 4793 generic.go:334] "Generic (PLEG): container finished" podID="ed2828e1-80b5-4e29-8980-c1b177608222" containerID="d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c" exitCode=0 Jan 27 20:41:12 crc kubenswrapper[4793]: I0127 20:41:12.538985 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerDied","Data":"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c"} Jan 27 20:41:13 crc kubenswrapper[4793]: I0127 20:41:13.551116 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerStarted","Data":"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be"} Jan 27 20:41:13 crc kubenswrapper[4793]: I0127 20:41:13.596295 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hp4x4" podStartSLOduration=3.1579506139999998 podStartE2EDuration="5.596272202s" podCreationTimestamp="2026-01-27 20:41:08 +0000 UTC" firstStartedPulling="2026-01-27 20:41:10.517532105 +0000 UTC m=+2295.907785261" lastFinishedPulling="2026-01-27 20:41:12.955853683 +0000 UTC m=+2298.346106849" observedRunningTime="2026-01-27 20:41:13.586020827 +0000 UTC m=+2298.976273983" watchObservedRunningTime="2026-01-27 20:41:13.596272202 +0000 UTC m=+2298.986525358" Jan 27 20:41:19 crc kubenswrapper[4793]: I0127 20:41:19.344699 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:19 crc kubenswrapper[4793]: I0127 20:41:19.346191 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:19 crc kubenswrapper[4793]: I0127 20:41:19.395079 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:19 crc kubenswrapper[4793]: I0127 20:41:19.656002 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:19 crc kubenswrapper[4793]: I0127 20:41:19.716571 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:21 crc kubenswrapper[4793]: I0127 20:41:21.619313 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hp4x4" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="registry-server" containerID="cri-o://20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be" gracePeriod=2 Jan 27 20:41:21 crc kubenswrapper[4793]: I0127 20:41:21.803632 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:41:21 crc kubenswrapper[4793]: E0127 20:41:21.803881 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.193283 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.319306 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities\") pod \"ed2828e1-80b5-4e29-8980-c1b177608222\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.319435 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content\") pod \"ed2828e1-80b5-4e29-8980-c1b177608222\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.319590 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brrqd\" (UniqueName: \"kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd\") pod \"ed2828e1-80b5-4e29-8980-c1b177608222\" (UID: \"ed2828e1-80b5-4e29-8980-c1b177608222\") " Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.320364 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities" (OuterVolumeSpecName: "utilities") pod "ed2828e1-80b5-4e29-8980-c1b177608222" (UID: "ed2828e1-80b5-4e29-8980-c1b177608222"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.326114 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd" (OuterVolumeSpecName: "kube-api-access-brrqd") pod "ed2828e1-80b5-4e29-8980-c1b177608222" (UID: "ed2828e1-80b5-4e29-8980-c1b177608222"). InnerVolumeSpecName "kube-api-access-brrqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.424461 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.424521 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brrqd\" (UniqueName: \"kubernetes.io/projected/ed2828e1-80b5-4e29-8980-c1b177608222-kube-api-access-brrqd\") on node \"crc\" DevicePath \"\"" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.630351 4793 generic.go:334] "Generic (PLEG): container finished" podID="ed2828e1-80b5-4e29-8980-c1b177608222" containerID="20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be" exitCode=0 Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.630442 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hp4x4" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.630467 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerDied","Data":"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be"} Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.630520 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hp4x4" event={"ID":"ed2828e1-80b5-4e29-8980-c1b177608222","Type":"ContainerDied","Data":"f71cbb49e42234290408e7db9dd55b646165ea6e99f277876ad2e54c460970ba"} Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.630562 4793 scope.go:117] "RemoveContainer" containerID="20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.653242 4793 scope.go:117] "RemoveContainer" containerID="d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.685310 4793 scope.go:117] "RemoveContainer" containerID="43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.723114 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed2828e1-80b5-4e29-8980-c1b177608222" (UID: "ed2828e1-80b5-4e29-8980-c1b177608222"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.726085 4793 scope.go:117] "RemoveContainer" containerID="20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be" Jan 27 20:41:22 crc kubenswrapper[4793]: E0127 20:41:22.726851 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be\": container with ID starting with 20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be not found: ID does not exist" containerID="20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.726897 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be"} err="failed to get container status \"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be\": rpc error: code = NotFound desc = could not find container \"20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be\": container with ID starting with 20d8008873760ca49de23d44f9c723d4bad981dcbe1683eaf6cf4abcb079e8be not found: ID does not exist" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.726928 4793 scope.go:117] "RemoveContainer" containerID="d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c" Jan 27 20:41:22 crc kubenswrapper[4793]: E0127 20:41:22.728040 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c\": container with ID starting with d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c not found: ID does not exist" containerID="d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.728076 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c"} err="failed to get container status \"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c\": rpc error: code = NotFound desc = could not find container \"d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c\": container with ID starting with d337ded29f3af35626851304ef3e62e7279c142e12e597ea504a5d828fef772c not found: ID does not exist" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.728094 4793 scope.go:117] "RemoveContainer" containerID="43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88" Jan 27 20:41:22 crc kubenswrapper[4793]: E0127 20:41:22.728748 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88\": container with ID starting with 43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88 not found: ID does not exist" containerID="43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.728819 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88"} err="failed to get container status \"43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88\": rpc error: code = NotFound desc = could not find container \"43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88\": container with ID starting with 43cfe791a679266e5c60d7f619b8260cc884a9db4aaeedf275bfdf9518fc9b88 not found: ID does not exist" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.729688 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2828e1-80b5-4e29-8980-c1b177608222-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.984602 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:22 crc kubenswrapper[4793]: I0127 20:41:22.998970 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hp4x4"] Jan 27 20:41:23 crc kubenswrapper[4793]: I0127 20:41:23.816072 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" path="/var/lib/kubelet/pods/ed2828e1-80b5-4e29-8980-c1b177608222/volumes" Jan 27 20:41:33 crc kubenswrapper[4793]: I0127 20:41:33.803290 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:41:33 crc kubenswrapper[4793]: E0127 20:41:33.804086 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:41:46 crc kubenswrapper[4793]: I0127 20:41:46.803702 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:41:46 crc kubenswrapper[4793]: E0127 20:41:46.805037 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:41:52 crc kubenswrapper[4793]: I0127 20:41:52.753827 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:41:52 crc kubenswrapper[4793]: I0127 20:41:52.754436 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:41:57 crc kubenswrapper[4793]: I0127 20:41:57.804267 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:41:57 crc kubenswrapper[4793]: E0127 20:41:57.805175 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:42:11 crc kubenswrapper[4793]: I0127 20:42:11.803946 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:42:11 crc kubenswrapper[4793]: E0127 20:42:11.806185 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:42:22 crc kubenswrapper[4793]: I0127 20:42:22.753292 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:42:22 crc kubenswrapper[4793]: I0127 20:42:22.753819 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:42:24 crc kubenswrapper[4793]: I0127 20:42:24.803511 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:42:24 crc kubenswrapper[4793]: E0127 20:42:24.804051 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:42:38 crc kubenswrapper[4793]: I0127 20:42:38.803641 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:42:38 crc kubenswrapper[4793]: E0127 20:42:38.804514 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:42:51 crc kubenswrapper[4793]: I0127 20:42:51.804536 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:42:51 crc kubenswrapper[4793]: E0127 20:42:51.805461 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:42:52 crc kubenswrapper[4793]: I0127 20:42:52.753613 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:42:52 crc kubenswrapper[4793]: I0127 20:42:52.753681 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:42:52 crc kubenswrapper[4793]: I0127 20:42:52.753795 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:42:52 crc kubenswrapper[4793]: I0127 20:42:52.754619 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:42:52 crc kubenswrapper[4793]: I0127 20:42:52.754688 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" gracePeriod=600 Jan 27 20:42:52 crc kubenswrapper[4793]: E0127 20:42:52.877235 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:42:53 crc kubenswrapper[4793]: I0127 20:42:53.961882 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" exitCode=0 Jan 27 20:42:53 crc kubenswrapper[4793]: I0127 20:42:53.962388 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f"} Jan 27 20:42:53 crc kubenswrapper[4793]: I0127 20:42:53.966684 4793 scope.go:117] "RemoveContainer" containerID="edc7c2540f3558d7380d42f09096b31a287517dfb634b17e1816e1dd5a5547a3" Jan 27 20:42:53 crc kubenswrapper[4793]: I0127 20:42:53.968813 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:42:53 crc kubenswrapper[4793]: E0127 20:42:53.977024 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:43:02 crc kubenswrapper[4793]: I0127 20:43:02.803792 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:43:03 crc kubenswrapper[4793]: I0127 20:43:03.049620 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298"} Jan 27 20:43:03 crc kubenswrapper[4793]: I0127 20:43:03.244391 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:43:06 crc kubenswrapper[4793]: I0127 20:43:06.203656 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" exitCode=1 Jan 27 20:43:06 crc kubenswrapper[4793]: I0127 20:43:06.203754 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298"} Jan 27 20:43:06 crc kubenswrapper[4793]: I0127 20:43:06.203920 4793 scope.go:117] "RemoveContainer" containerID="2ca82ea0bdbb8f4560ba3caa440dde43a5bf8da490fa8cb7252a9e3fb1c65e0d" Jan 27 20:43:06 crc kubenswrapper[4793]: I0127 20:43:06.204659 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:06 crc kubenswrapper[4793]: E0127 20:43:06.204889 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:08 crc kubenswrapper[4793]: I0127 20:43:08.243163 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:43:08 crc kubenswrapper[4793]: I0127 20:43:08.243451 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:43:08 crc kubenswrapper[4793]: I0127 20:43:08.243465 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:43:08 crc kubenswrapper[4793]: I0127 20:43:08.244170 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:08 crc kubenswrapper[4793]: E0127 20:43:08.244479 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:08 crc kubenswrapper[4793]: I0127 20:43:08.804344 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:43:08 crc kubenswrapper[4793]: E0127 20:43:08.805624 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:43:18 crc kubenswrapper[4793]: I0127 20:43:18.802962 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:18 crc kubenswrapper[4793]: E0127 20:43:18.803850 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:19 crc kubenswrapper[4793]: I0127 20:43:19.809573 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:43:19 crc kubenswrapper[4793]: E0127 20:43:19.810742 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:43:32 crc kubenswrapper[4793]: I0127 20:43:32.803818 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:32 crc kubenswrapper[4793]: E0127 20:43:32.804692 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:33 crc kubenswrapper[4793]: I0127 20:43:33.802928 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:43:33 crc kubenswrapper[4793]: E0127 20:43:33.803351 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:43:44 crc kubenswrapper[4793]: I0127 20:43:44.803282 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:44 crc kubenswrapper[4793]: E0127 20:43:44.804081 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:46 crc kubenswrapper[4793]: I0127 20:43:46.806814 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:43:46 crc kubenswrapper[4793]: E0127 20:43:46.807208 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:43:57 crc kubenswrapper[4793]: I0127 20:43:57.803280 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:43:57 crc kubenswrapper[4793]: E0127 20:43:57.804026 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:43:58 crc kubenswrapper[4793]: I0127 20:43:58.803808 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:43:58 crc kubenswrapper[4793]: E0127 20:43:58.804386 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:44:10 crc kubenswrapper[4793]: I0127 20:44:10.803587 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:44:10 crc kubenswrapper[4793]: E0127 20:44:10.804409 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:44:12 crc kubenswrapper[4793]: I0127 20:44:12.803915 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:44:12 crc kubenswrapper[4793]: E0127 20:44:12.804446 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:44:22 crc kubenswrapper[4793]: I0127 20:44:22.803823 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:44:22 crc kubenswrapper[4793]: E0127 20:44:22.805433 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:44:27 crc kubenswrapper[4793]: I0127 20:44:27.803910 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:44:27 crc kubenswrapper[4793]: E0127 20:44:27.806342 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:44:37 crc kubenswrapper[4793]: I0127 20:44:37.804459 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:44:37 crc kubenswrapper[4793]: E0127 20:44:37.805322 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:44:40 crc kubenswrapper[4793]: I0127 20:44:40.803943 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:44:40 crc kubenswrapper[4793]: E0127 20:44:40.804645 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:44:50 crc kubenswrapper[4793]: I0127 20:44:50.803596 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:44:50 crc kubenswrapper[4793]: E0127 20:44:50.804431 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:44:53 crc kubenswrapper[4793]: I0127 20:44:53.804298 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:44:53 crc kubenswrapper[4793]: E0127 20:44:53.804999 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.155080 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk"] Jan 27 20:45:00 crc kubenswrapper[4793]: E0127 20:45:00.156120 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="extract-utilities" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.156137 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="extract-utilities" Jan 27 20:45:00 crc kubenswrapper[4793]: E0127 20:45:00.156156 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="registry-server" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.156163 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="registry-server" Jan 27 20:45:00 crc kubenswrapper[4793]: E0127 20:45:00.156208 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="extract-content" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.156218 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="extract-content" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.156470 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed2828e1-80b5-4e29-8980-c1b177608222" containerName="registry-server" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.157406 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.165029 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.166432 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.177921 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.178320 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.178421 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbpm5\" (UniqueName: \"kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.180211 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk"] Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.282109 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.282170 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbpm5\" (UniqueName: \"kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.282285 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.284860 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.291416 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.303265 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbpm5\" (UniqueName: \"kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5\") pod \"collect-profiles-29492445-gnchk\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:00 crc kubenswrapper[4793]: I0127 20:45:00.528010 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:01 crc kubenswrapper[4793]: I0127 20:45:01.000592 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk"] Jan 27 20:45:01 crc kubenswrapper[4793]: I0127 20:45:01.462770 4793 generic.go:334] "Generic (PLEG): container finished" podID="ec45dbcf-8423-4c64-b5b0-2a84839af548" containerID="a8c0a5dda825bab7f2525db5e510a8f6dabaddf3aec1ff224db88236a63fdf0c" exitCode=0 Jan 27 20:45:01 crc kubenswrapper[4793]: I0127 20:45:01.462834 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" event={"ID":"ec45dbcf-8423-4c64-b5b0-2a84839af548","Type":"ContainerDied","Data":"a8c0a5dda825bab7f2525db5e510a8f6dabaddf3aec1ff224db88236a63fdf0c"} Jan 27 20:45:01 crc kubenswrapper[4793]: I0127 20:45:01.462881 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" event={"ID":"ec45dbcf-8423-4c64-b5b0-2a84839af548","Type":"ContainerStarted","Data":"40bbd7f8aef0d196a24159cba5038c4dc94b1451dee8bc4f7bafad16a6029897"} Jan 27 20:45:02 crc kubenswrapper[4793]: I0127 20:45:02.888835 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.028876 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbpm5\" (UniqueName: \"kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5\") pod \"ec45dbcf-8423-4c64-b5b0-2a84839af548\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.028932 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume\") pod \"ec45dbcf-8423-4c64-b5b0-2a84839af548\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.029046 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume\") pod \"ec45dbcf-8423-4c64-b5b0-2a84839af548\" (UID: \"ec45dbcf-8423-4c64-b5b0-2a84839af548\") " Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.031621 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume" (OuterVolumeSpecName: "config-volume") pod "ec45dbcf-8423-4c64-b5b0-2a84839af548" (UID: "ec45dbcf-8423-4c64-b5b0-2a84839af548"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.034976 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5" (OuterVolumeSpecName: "kube-api-access-dbpm5") pod "ec45dbcf-8423-4c64-b5b0-2a84839af548" (UID: "ec45dbcf-8423-4c64-b5b0-2a84839af548"). InnerVolumeSpecName "kube-api-access-dbpm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.035110 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ec45dbcf-8423-4c64-b5b0-2a84839af548" (UID: "ec45dbcf-8423-4c64-b5b0-2a84839af548"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.132065 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbpm5\" (UniqueName: \"kubernetes.io/projected/ec45dbcf-8423-4c64-b5b0-2a84839af548-kube-api-access-dbpm5\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.132117 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec45dbcf-8423-4c64-b5b0-2a84839af548-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.132130 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec45dbcf-8423-4c64-b5b0-2a84839af548-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.484906 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" event={"ID":"ec45dbcf-8423-4c64-b5b0-2a84839af548","Type":"ContainerDied","Data":"40bbd7f8aef0d196a24159cba5038c4dc94b1451dee8bc4f7bafad16a6029897"} Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.484948 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40bbd7f8aef0d196a24159cba5038c4dc94b1451dee8bc4f7bafad16a6029897" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.485004 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk" Jan 27 20:45:03 crc kubenswrapper[4793]: I0127 20:45:03.804085 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:45:03 crc kubenswrapper[4793]: E0127 20:45:03.804736 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:45:04 crc kubenswrapper[4793]: I0127 20:45:04.015761 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4"] Jan 27 20:45:04 crc kubenswrapper[4793]: I0127 20:45:04.026303 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492400-v99t4"] Jan 27 20:45:04 crc kubenswrapper[4793]: I0127 20:45:04.827936 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:45:04 crc kubenswrapper[4793]: E0127 20:45:04.828764 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:05 crc kubenswrapper[4793]: I0127 20:45:05.815620 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11588ab1-689a-4227-a887-a57b945807a2" path="/var/lib/kubelet/pods/11588ab1-689a-4227-a887-a57b945807a2/volumes" Jan 27 20:45:14 crc kubenswrapper[4793]: I0127 20:45:14.804093 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:45:14 crc kubenswrapper[4793]: E0127 20:45:14.805077 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:45:16 crc kubenswrapper[4793]: I0127 20:45:16.803069 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:45:16 crc kubenswrapper[4793]: E0127 20:45:16.803475 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:27 crc kubenswrapper[4793]: I0127 20:45:27.971394 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:27 crc kubenswrapper[4793]: E0127 20:45:27.972717 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec45dbcf-8423-4c64-b5b0-2a84839af548" containerName="collect-profiles" Jan 27 20:45:27 crc kubenswrapper[4793]: I0127 20:45:27.972736 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec45dbcf-8423-4c64-b5b0-2a84839af548" containerName="collect-profiles" Jan 27 20:45:27 crc kubenswrapper[4793]: I0127 20:45:27.973050 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec45dbcf-8423-4c64-b5b0-2a84839af548" containerName="collect-profiles" Jan 27 20:45:27 crc kubenswrapper[4793]: I0127 20:45:27.986187 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.005633 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.130926 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.130984 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q65dp\" (UniqueName: \"kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.131015 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.233498 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.233563 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.233602 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q65dp\" (UniqueName: \"kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.234014 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.234190 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.253503 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q65dp\" (UniqueName: \"kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp\") pod \"redhat-marketplace-sb87r\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:28 crc kubenswrapper[4793]: I0127 20:45:28.313285 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:29 crc kubenswrapper[4793]: I0127 20:45:29.027866 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:29 crc kubenswrapper[4793]: I0127 20:45:29.803734 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:45:29 crc kubenswrapper[4793]: E0127 20:45:29.803994 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:45:29 crc kubenswrapper[4793]: I0127 20:45:29.901071 4793 generic.go:334] "Generic (PLEG): container finished" podID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerID="f015f85cf954aced09124c47e4b01675b208540754578eefe8da23f89d048228" exitCode=0 Jan 27 20:45:29 crc kubenswrapper[4793]: I0127 20:45:29.901133 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerDied","Data":"f015f85cf954aced09124c47e4b01675b208540754578eefe8da23f89d048228"} Jan 27 20:45:29 crc kubenswrapper[4793]: I0127 20:45:29.901186 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerStarted","Data":"a77b8b4f19312932d95ae1e4eef7d76b083e5561586318ce36d0112c8da61e7c"} Jan 27 20:45:30 crc kubenswrapper[4793]: I0127 20:45:30.804116 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:45:30 crc kubenswrapper[4793]: E0127 20:45:30.804771 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:30 crc kubenswrapper[4793]: I0127 20:45:30.923203 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerStarted","Data":"a3a757e232f4b74715187098d66203479d1a7d2eeceb36326e4b5baa4820e097"} Jan 27 20:45:31 crc kubenswrapper[4793]: I0127 20:45:31.933362 4793 generic.go:334] "Generic (PLEG): container finished" podID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerID="a3a757e232f4b74715187098d66203479d1a7d2eeceb36326e4b5baa4820e097" exitCode=0 Jan 27 20:45:31 crc kubenswrapper[4793]: I0127 20:45:31.933408 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerDied","Data":"a3a757e232f4b74715187098d66203479d1a7d2eeceb36326e4b5baa4820e097"} Jan 27 20:45:32 crc kubenswrapper[4793]: I0127 20:45:32.576641 4793 scope.go:117] "RemoveContainer" containerID="59a65e3215a81d98738e1626c562a1b04b0e491558cf2fe85839854ff5c573b7" Jan 27 20:45:32 crc kubenswrapper[4793]: I0127 20:45:32.943506 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerStarted","Data":"b160ddf6be5cda6685b055f58af41e5284326a4fdbabbb17b7b2797679ce587e"} Jan 27 20:45:32 crc kubenswrapper[4793]: I0127 20:45:32.967878 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sb87r" podStartSLOduration=3.5459803069999998 podStartE2EDuration="5.96785816s" podCreationTimestamp="2026-01-27 20:45:27 +0000 UTC" firstStartedPulling="2026-01-27 20:45:29.903091071 +0000 UTC m=+2555.293344227" lastFinishedPulling="2026-01-27 20:45:32.324968924 +0000 UTC m=+2557.715222080" observedRunningTime="2026-01-27 20:45:32.963441319 +0000 UTC m=+2558.353694495" watchObservedRunningTime="2026-01-27 20:45:32.96785816 +0000 UTC m=+2558.358111316" Jan 27 20:45:38 crc kubenswrapper[4793]: I0127 20:45:38.315143 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:38 crc kubenswrapper[4793]: I0127 20:45:38.316261 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:38 crc kubenswrapper[4793]: I0127 20:45:38.364213 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:39 crc kubenswrapper[4793]: I0127 20:45:39.194332 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:39 crc kubenswrapper[4793]: I0127 20:45:39.262925 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.015747 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.250155 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.264252 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.290611 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sb87r" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="registry-server" containerID="cri-o://b160ddf6be5cda6685b055f58af41e5284326a4fdbabbb17b7b2797679ce587e" gracePeriod=2 Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.352451 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.352582 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l26zb\" (UniqueName: \"kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.352672 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.454990 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.455135 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.455233 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l26zb\" (UniqueName: \"kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.456162 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.456429 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.476780 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l26zb\" (UniqueName: \"kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb\") pod \"certified-operators-ksktt\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:41 crc kubenswrapper[4793]: I0127 20:45:41.594412 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.125975 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.316620 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerStarted","Data":"9d115363fb091fff16cb3bfc1b438ce3604fd871c9d9cf6a8ba68a5ee60fc8d9"} Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.333312 4793 generic.go:334] "Generic (PLEG): container finished" podID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerID="b160ddf6be5cda6685b055f58af41e5284326a4fdbabbb17b7b2797679ce587e" exitCode=0 Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.333374 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerDied","Data":"b160ddf6be5cda6685b055f58af41e5284326a4fdbabbb17b7b2797679ce587e"} Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.405423 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.478423 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities\") pod \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.480203 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities" (OuterVolumeSpecName: "utilities") pod "b036bdd2-8e9c-4a17-8162-1e5a94520fba" (UID: "b036bdd2-8e9c-4a17-8162-1e5a94520fba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.480424 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content\") pod \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.487614 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q65dp\" (UniqueName: \"kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp\") pod \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\" (UID: \"b036bdd2-8e9c-4a17-8162-1e5a94520fba\") " Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.491600 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.498793 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp" (OuterVolumeSpecName: "kube-api-access-q65dp") pod "b036bdd2-8e9c-4a17-8162-1e5a94520fba" (UID: "b036bdd2-8e9c-4a17-8162-1e5a94520fba"). InnerVolumeSpecName "kube-api-access-q65dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.505284 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b036bdd2-8e9c-4a17-8162-1e5a94520fba" (UID: "b036bdd2-8e9c-4a17-8162-1e5a94520fba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.594271 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b036bdd2-8e9c-4a17-8162-1e5a94520fba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:42 crc kubenswrapper[4793]: I0127 20:45:42.594521 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q65dp\" (UniqueName: \"kubernetes.io/projected/b036bdd2-8e9c-4a17-8162-1e5a94520fba-kube-api-access-q65dp\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.344107 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sb87r" event={"ID":"b036bdd2-8e9c-4a17-8162-1e5a94520fba","Type":"ContainerDied","Data":"a77b8b4f19312932d95ae1e4eef7d76b083e5561586318ce36d0112c8da61e7c"} Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.344165 4793 scope.go:117] "RemoveContainer" containerID="b160ddf6be5cda6685b055f58af41e5284326a4fdbabbb17b7b2797679ce587e" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.344159 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sb87r" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.347311 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d3b109d-c597-40af-a65a-2554626509fc" containerID="4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf" exitCode=0 Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.347354 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerDied","Data":"4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf"} Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.377786 4793 scope.go:117] "RemoveContainer" containerID="a3a757e232f4b74715187098d66203479d1a7d2eeceb36326e4b5baa4820e097" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.506176 4793 scope.go:117] "RemoveContainer" containerID="f015f85cf954aced09124c47e4b01675b208540754578eefe8da23f89d048228" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.513173 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.522444 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sb87r"] Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.803832 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.804092 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:45:43 crc kubenswrapper[4793]: E0127 20:45:43.804238 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:43 crc kubenswrapper[4793]: E0127 20:45:43.804713 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:45:43 crc kubenswrapper[4793]: I0127 20:45:43.824965 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" path="/var/lib/kubelet/pods/b036bdd2-8e9c-4a17-8162-1e5a94520fba/volumes" Jan 27 20:45:44 crc kubenswrapper[4793]: I0127 20:45:44.361821 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerStarted","Data":"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d"} Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.371511 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d3b109d-c597-40af-a65a-2554626509fc" containerID="e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d" exitCode=0 Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.371592 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerDied","Data":"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d"} Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.835568 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:45:45 crc kubenswrapper[4793]: E0127 20:45:45.836635 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="extract-content" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.836656 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="extract-content" Jan 27 20:45:45 crc kubenswrapper[4793]: E0127 20:45:45.836674 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="registry-server" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.836680 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="registry-server" Jan 27 20:45:45 crc kubenswrapper[4793]: E0127 20:45:45.836704 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="extract-utilities" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.836712 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="extract-utilities" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.836998 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b036bdd2-8e9c-4a17-8162-1e5a94520fba" containerName="registry-server" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.842697 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.854781 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.973995 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.974346 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:45 crc kubenswrapper[4793]: I0127 20:45:45.974567 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mkd5\" (UniqueName: \"kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.076972 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.077404 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mkd5\" (UniqueName: \"kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.077895 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.078045 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.078282 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.098351 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mkd5\" (UniqueName: \"kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5\") pod \"redhat-operators-sdx29\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.185499 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.468414 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerStarted","Data":"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3"} Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.505535 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ksktt" podStartSLOduration=4.079425904 podStartE2EDuration="6.505504801s" podCreationTimestamp="2026-01-27 20:45:40 +0000 UTC" firstStartedPulling="2026-01-27 20:45:43.349816285 +0000 UTC m=+2568.740069461" lastFinishedPulling="2026-01-27 20:45:45.775895172 +0000 UTC m=+2571.166148358" observedRunningTime="2026-01-27 20:45:46.494536191 +0000 UTC m=+2571.884789347" watchObservedRunningTime="2026-01-27 20:45:46.505504801 +0000 UTC m=+2571.895757957" Jan 27 20:45:46 crc kubenswrapper[4793]: I0127 20:45:46.833069 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:45:46 crc kubenswrapper[4793]: W0127 20:45:46.858114 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a39b9f6_af38_406f_8d7a_ed03d3ca476e.slice/crio-be9d08e3181d99c32c49976f4d0e7a0fd3df18c527e31df546fc263a90277760 WatchSource:0}: Error finding container be9d08e3181d99c32c49976f4d0e7a0fd3df18c527e31df546fc263a90277760: Status 404 returned error can't find the container with id be9d08e3181d99c32c49976f4d0e7a0fd3df18c527e31df546fc263a90277760 Jan 27 20:45:47 crc kubenswrapper[4793]: I0127 20:45:47.479685 4793 generic.go:334] "Generic (PLEG): container finished" podID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerID="dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe" exitCode=0 Jan 27 20:45:47 crc kubenswrapper[4793]: I0127 20:45:47.479791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerDied","Data":"dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe"} Jan 27 20:45:47 crc kubenswrapper[4793]: I0127 20:45:47.480043 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerStarted","Data":"be9d08e3181d99c32c49976f4d0e7a0fd3df18c527e31df546fc263a90277760"} Jan 27 20:45:48 crc kubenswrapper[4793]: I0127 20:45:48.493983 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerStarted","Data":"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53"} Jan 27 20:45:51 crc kubenswrapper[4793]: I0127 20:45:51.594714 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:51 crc kubenswrapper[4793]: I0127 20:45:51.595007 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:51 crc kubenswrapper[4793]: I0127 20:45:51.644508 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:52 crc kubenswrapper[4793]: I0127 20:45:52.188402 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:53 crc kubenswrapper[4793]: I0127 20:45:53.153869 4793 generic.go:334] "Generic (PLEG): container finished" podID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerID="df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53" exitCode=0 Jan 27 20:45:53 crc kubenswrapper[4793]: I0127 20:45:53.153944 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerDied","Data":"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53"} Jan 27 20:45:53 crc kubenswrapper[4793]: I0127 20:45:53.609390 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.164006 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ksktt" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="registry-server" containerID="cri-o://a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3" gracePeriod=2 Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.675178 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.788968 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities\") pod \"9d3b109d-c597-40af-a65a-2554626509fc\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.789160 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l26zb\" (UniqueName: \"kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb\") pod \"9d3b109d-c597-40af-a65a-2554626509fc\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.789238 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content\") pod \"9d3b109d-c597-40af-a65a-2554626509fc\" (UID: \"9d3b109d-c597-40af-a65a-2554626509fc\") " Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.789956 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities" (OuterVolumeSpecName: "utilities") pod "9d3b109d-c597-40af-a65a-2554626509fc" (UID: "9d3b109d-c597-40af-a65a-2554626509fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.811467 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb" (OuterVolumeSpecName: "kube-api-access-l26zb") pod "9d3b109d-c597-40af-a65a-2554626509fc" (UID: "9d3b109d-c597-40af-a65a-2554626509fc"). InnerVolumeSpecName "kube-api-access-l26zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.846036 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d3b109d-c597-40af-a65a-2554626509fc" (UID: "9d3b109d-c597-40af-a65a-2554626509fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.891811 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.891858 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3b109d-c597-40af-a65a-2554626509fc-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:54 crc kubenswrapper[4793]: I0127 20:45:54.891871 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l26zb\" (UniqueName: \"kubernetes.io/projected/9d3b109d-c597-40af-a65a-2554626509fc-kube-api-access-l26zb\") on node \"crc\" DevicePath \"\"" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.177087 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerStarted","Data":"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720"} Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.179214 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d3b109d-c597-40af-a65a-2554626509fc" containerID="a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3" exitCode=0 Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.179263 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerDied","Data":"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3"} Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.179296 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksktt" event={"ID":"9d3b109d-c597-40af-a65a-2554626509fc","Type":"ContainerDied","Data":"9d115363fb091fff16cb3bfc1b438ce3604fd871c9d9cf6a8ba68a5ee60fc8d9"} Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.179314 4793 scope.go:117] "RemoveContainer" containerID="a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.179336 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksktt" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.210091 4793 scope.go:117] "RemoveContainer" containerID="e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.211639 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sdx29" podStartSLOduration=3.6976675329999997 podStartE2EDuration="10.211615646s" podCreationTimestamp="2026-01-27 20:45:45 +0000 UTC" firstStartedPulling="2026-01-27 20:45:47.482151037 +0000 UTC m=+2572.872404193" lastFinishedPulling="2026-01-27 20:45:53.99609915 +0000 UTC m=+2579.386352306" observedRunningTime="2026-01-27 20:45:55.200963253 +0000 UTC m=+2580.591216409" watchObservedRunningTime="2026-01-27 20:45:55.211615646 +0000 UTC m=+2580.601868802" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.226654 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.237953 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ksktt"] Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.241333 4793 scope.go:117] "RemoveContainer" containerID="4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.292864 4793 scope.go:117] "RemoveContainer" containerID="a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3" Jan 27 20:45:55 crc kubenswrapper[4793]: E0127 20:45:55.293529 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3\": container with ID starting with a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3 not found: ID does not exist" containerID="a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.293701 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3"} err="failed to get container status \"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3\": rpc error: code = NotFound desc = could not find container \"a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3\": container with ID starting with a9205f989a7e1cd8c6284f9557475c40556b140b9b32d22e642c18e735477fd3 not found: ID does not exist" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.293805 4793 scope.go:117] "RemoveContainer" containerID="e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d" Jan 27 20:45:55 crc kubenswrapper[4793]: E0127 20:45:55.297726 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d\": container with ID starting with e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d not found: ID does not exist" containerID="e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.297778 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d"} err="failed to get container status \"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d\": rpc error: code = NotFound desc = could not find container \"e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d\": container with ID starting with e76c423eb0edc98dece1c61d930121cb2268fb1b68507b7a61cf831a83f0e48d not found: ID does not exist" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.297808 4793 scope.go:117] "RemoveContainer" containerID="4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf" Jan 27 20:45:55 crc kubenswrapper[4793]: E0127 20:45:55.298532 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf\": container with ID starting with 4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf not found: ID does not exist" containerID="4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.298604 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf"} err="failed to get container status \"4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf\": rpc error: code = NotFound desc = could not find container \"4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf\": container with ID starting with 4954b6ef633fd69ac0f96ccd3ce6ca452426137e7ff91806ebe228fbd68e54cf not found: ID does not exist" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.812241 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:45:55 crc kubenswrapper[4793]: E0127 20:45:55.812929 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:45:55 crc kubenswrapper[4793]: I0127 20:45:55.818102 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d3b109d-c597-40af-a65a-2554626509fc" path="/var/lib/kubelet/pods/9d3b109d-c597-40af-a65a-2554626509fc/volumes" Jan 27 20:45:56 crc kubenswrapper[4793]: I0127 20:45:56.185972 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:56 crc kubenswrapper[4793]: I0127 20:45:56.186035 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:45:56 crc kubenswrapper[4793]: I0127 20:45:56.803992 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:45:56 crc kubenswrapper[4793]: E0127 20:45:56.804735 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:45:57 crc kubenswrapper[4793]: I0127 20:45:57.233410 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sdx29" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="registry-server" probeResult="failure" output=< Jan 27 20:45:57 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:45:57 crc kubenswrapper[4793]: > Jan 27 20:46:06 crc kubenswrapper[4793]: I0127 20:46:06.233971 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:46:06 crc kubenswrapper[4793]: I0127 20:46:06.285182 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:46:06 crc kubenswrapper[4793]: I0127 20:46:06.469361 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.289068 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sdx29" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="registry-server" containerID="cri-o://e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720" gracePeriod=2 Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.719313 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.728632 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content\") pod \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.728677 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities\") pod \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.728747 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mkd5\" (UniqueName: \"kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5\") pod \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\" (UID: \"2a39b9f6-af38-406f-8d7a-ed03d3ca476e\") " Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.730326 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities" (OuterVolumeSpecName: "utilities") pod "2a39b9f6-af38-406f-8d7a-ed03d3ca476e" (UID: "2a39b9f6-af38-406f-8d7a-ed03d3ca476e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:46:07 crc kubenswrapper[4793]: I0127 20:46:07.737402 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5" (OuterVolumeSpecName: "kube-api-access-6mkd5") pod "2a39b9f6-af38-406f-8d7a-ed03d3ca476e" (UID: "2a39b9f6-af38-406f-8d7a-ed03d3ca476e"). InnerVolumeSpecName "kube-api-access-6mkd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.094170 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.094203 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mkd5\" (UniqueName: \"kubernetes.io/projected/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-kube-api-access-6mkd5\") on node \"crc\" DevicePath \"\"" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.190689 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a39b9f6-af38-406f-8d7a-ed03d3ca476e" (UID: "2a39b9f6-af38-406f-8d7a-ed03d3ca476e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.196931 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a39b9f6-af38-406f-8d7a-ed03d3ca476e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.302241 4793 generic.go:334] "Generic (PLEG): container finished" podID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerID="e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720" exitCode=0 Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.302307 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerDied","Data":"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720"} Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.302338 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdx29" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.302361 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdx29" event={"ID":"2a39b9f6-af38-406f-8d7a-ed03d3ca476e","Type":"ContainerDied","Data":"be9d08e3181d99c32c49976f4d0e7a0fd3df18c527e31df546fc263a90277760"} Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.302383 4793 scope.go:117] "RemoveContainer" containerID="e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.345290 4793 scope.go:117] "RemoveContainer" containerID="df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.351035 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.362476 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sdx29"] Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.381121 4793 scope.go:117] "RemoveContainer" containerID="dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.429037 4793 scope.go:117] "RemoveContainer" containerID="e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720" Jan 27 20:46:08 crc kubenswrapper[4793]: E0127 20:46:08.429641 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720\": container with ID starting with e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720 not found: ID does not exist" containerID="e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.429691 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720"} err="failed to get container status \"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720\": rpc error: code = NotFound desc = could not find container \"e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720\": container with ID starting with e1e972a65a8d89cce729e0a38b165ab9743f4210b9305bb9206bafbabfcfe720 not found: ID does not exist" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.429718 4793 scope.go:117] "RemoveContainer" containerID="df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53" Jan 27 20:46:08 crc kubenswrapper[4793]: E0127 20:46:08.430147 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53\": container with ID starting with df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53 not found: ID does not exist" containerID="df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.430176 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53"} err="failed to get container status \"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53\": rpc error: code = NotFound desc = could not find container \"df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53\": container with ID starting with df2d69863dd9604a1dceab64ae43def30118b6565a3024294479a16c2e7e8d53 not found: ID does not exist" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.430191 4793 scope.go:117] "RemoveContainer" containerID="dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe" Jan 27 20:46:08 crc kubenswrapper[4793]: E0127 20:46:08.430507 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe\": container with ID starting with dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe not found: ID does not exist" containerID="dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe" Jan 27 20:46:08 crc kubenswrapper[4793]: I0127 20:46:08.430576 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe"} err="failed to get container status \"dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe\": rpc error: code = NotFound desc = could not find container \"dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe\": container with ID starting with dbcdb9f6ebb0ffca99513f37fc7401d4b6c5061fca9cfd1c2a70312915032ffe not found: ID does not exist" Jan 27 20:46:09 crc kubenswrapper[4793]: I0127 20:46:09.803587 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:46:09 crc kubenswrapper[4793]: I0127 20:46:09.804015 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:46:09 crc kubenswrapper[4793]: E0127 20:46:09.804186 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:46:09 crc kubenswrapper[4793]: E0127 20:46:09.804197 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:46:09 crc kubenswrapper[4793]: I0127 20:46:09.813859 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" path="/var/lib/kubelet/pods/2a39b9f6-af38-406f-8d7a-ed03d3ca476e/volumes" Jan 27 20:46:22 crc kubenswrapper[4793]: I0127 20:46:22.803153 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:46:22 crc kubenswrapper[4793]: E0127 20:46:22.804040 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:46:24 crc kubenswrapper[4793]: I0127 20:46:24.803682 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:46:24 crc kubenswrapper[4793]: E0127 20:46:24.806067 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:46:35 crc kubenswrapper[4793]: I0127 20:46:35.811385 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:46:35 crc kubenswrapper[4793]: E0127 20:46:35.812572 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:46:37 crc kubenswrapper[4793]: I0127 20:46:37.804114 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:46:37 crc kubenswrapper[4793]: E0127 20:46:37.804918 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:46:48 crc kubenswrapper[4793]: I0127 20:46:48.803775 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:46:48 crc kubenswrapper[4793]: E0127 20:46:48.804731 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:46:50 crc kubenswrapper[4793]: I0127 20:46:50.803323 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:46:50 crc kubenswrapper[4793]: E0127 20:46:50.803866 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:46:59 crc kubenswrapper[4793]: I0127 20:46:59.804045 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:46:59 crc kubenswrapper[4793]: E0127 20:46:59.804828 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:47:01 crc kubenswrapper[4793]: I0127 20:47:01.803609 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:47:01 crc kubenswrapper[4793]: E0127 20:47:01.804227 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:47:12 crc kubenswrapper[4793]: I0127 20:47:12.804540 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:47:12 crc kubenswrapper[4793]: E0127 20:47:12.805535 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:47:12 crc kubenswrapper[4793]: I0127 20:47:12.805759 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:47:12 crc kubenswrapper[4793]: E0127 20:47:12.805993 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:47:23 crc kubenswrapper[4793]: I0127 20:47:23.804097 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:47:23 crc kubenswrapper[4793]: E0127 20:47:23.805017 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:47:27 crc kubenswrapper[4793]: I0127 20:47:27.804138 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:47:27 crc kubenswrapper[4793]: E0127 20:47:27.805046 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:47:36 crc kubenswrapper[4793]: I0127 20:47:36.803655 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:47:36 crc kubenswrapper[4793]: E0127 20:47:36.804610 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:47:40 crc kubenswrapper[4793]: I0127 20:47:40.803132 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:47:40 crc kubenswrapper[4793]: E0127 20:47:40.805108 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:47:49 crc kubenswrapper[4793]: I0127 20:47:49.810293 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:47:49 crc kubenswrapper[4793]: E0127 20:47:49.811083 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:47:52 crc kubenswrapper[4793]: I0127 20:47:52.803976 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:47:53 crc kubenswrapper[4793]: I0127 20:47:53.374803 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad"} Jan 27 20:48:03 crc kubenswrapper[4793]: I0127 20:48:03.805077 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:48:03 crc kubenswrapper[4793]: E0127 20:48:03.806393 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:15 crc kubenswrapper[4793]: I0127 20:48:15.812465 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:48:16 crc kubenswrapper[4793]: I0127 20:48:16.346314 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542"} Jan 27 20:48:18 crc kubenswrapper[4793]: I0127 20:48:18.242751 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:48:18 crc kubenswrapper[4793]: I0127 20:48:18.243078 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:48:18 crc kubenswrapper[4793]: I0127 20:48:18.270062 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 20:48:18 crc kubenswrapper[4793]: I0127 20:48:18.797584 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 20:48:19 crc kubenswrapper[4793]: I0127 20:48:19.756946 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" exitCode=1 Jan 27 20:48:19 crc kubenswrapper[4793]: I0127 20:48:19.756992 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542"} Jan 27 20:48:19 crc kubenswrapper[4793]: I0127 20:48:19.757042 4793 scope.go:117] "RemoveContainer" containerID="fd51c5086a67f03b67c573a8080e9cdfebbc1b39538ac03194489f72d09a4298" Jan 27 20:48:19 crc kubenswrapper[4793]: I0127 20:48:19.757746 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:19 crc kubenswrapper[4793]: E0127 20:48:19.758097 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:20 crc kubenswrapper[4793]: I0127 20:48:20.767773 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:20 crc kubenswrapper[4793]: E0127 20:48:20.768452 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:23 crc kubenswrapper[4793]: I0127 20:48:23.242459 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:48:23 crc kubenswrapper[4793]: I0127 20:48:23.243756 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:23 crc kubenswrapper[4793]: E0127 20:48:23.244140 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:28 crc kubenswrapper[4793]: I0127 20:48:28.242847 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:48:28 crc kubenswrapper[4793]: I0127 20:48:28.243379 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:48:28 crc kubenswrapper[4793]: I0127 20:48:28.244259 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:28 crc kubenswrapper[4793]: E0127 20:48:28.244572 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:29 crc kubenswrapper[4793]: I0127 20:48:29.679996 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:31 crc kubenswrapper[4793]: I0127 20:48:31.098990 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:34 crc kubenswrapper[4793]: I0127 20:48:34.533738 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" containerID="cri-o://642c4da046cb49592c2dbdb5ea15fec53c4d366602f78d9dd9a9adc6392c53a3" gracePeriod=604796 Jan 27 20:48:34 crc kubenswrapper[4793]: I0127 20:48:34.578411 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Jan 27 20:48:35 crc kubenswrapper[4793]: I0127 20:48:35.091699 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" containerID="cri-o://43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495" gracePeriod=604797 Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.107955 4793 generic.go:334] "Generic (PLEG): container finished" podID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerID="642c4da046cb49592c2dbdb5ea15fec53c4d366602f78d9dd9a9adc6392c53a3" exitCode=0 Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.108281 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerDied","Data":"642c4da046cb49592c2dbdb5ea15fec53c4d366602f78d9dd9a9adc6392c53a3"} Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.108336 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7","Type":"ContainerDied","Data":"fb17f389b32917d7d96e65262b1065b09823ad457d4d8a304db978269f914b3e"} Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.108346 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb17f389b32917d7d96e65262b1065b09823ad457d4d8a304db978269f914b3e" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.191004 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345291 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345370 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345577 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345618 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345639 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345666 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4s6f\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345698 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345719 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345799 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345850 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.345907 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd\") pod \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\" (UID: \"986a3fd9-9573-46d3-a71d-b3fbe5fd87f7\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.349192 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.349358 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.351144 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.359015 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info" (OuterVolumeSpecName: "pod-info") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.361756 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.362173 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f" (OuterVolumeSpecName: "kube-api-access-q4s6f") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "kube-api-access-q4s6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.362719 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.379534 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448085 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448153 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448163 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448180 4793 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448189 4793 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448197 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4s6f\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-kube-api-access-q4s6f\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448208 4793 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.448216 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.456845 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data" (OuterVolumeSpecName: "config-data") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.467365 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf" (OuterVolumeSpecName: "server-conf") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.469727 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.520502 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" (UID: "986a3fd9-9573-46d3-a71d-b3fbe5fd87f7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.549787 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.550094 4793 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.550104 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.550116 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.683484 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.856835 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.856888 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.856938 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjjfq\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.856971 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857001 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857027 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857058 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857138 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857181 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857245 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.857286 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret\") pod \"be1afc99-1852-4e3b-a2e7-e9beab138334\" (UID: \"be1afc99-1852-4e3b-a2e7-e9beab138334\") " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.858646 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.858684 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.858721 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.863572 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.864177 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info" (OuterVolumeSpecName: "pod-info") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.866690 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.866787 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq" (OuterVolumeSpecName: "kube-api-access-fjjfq") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "kube-api-access-fjjfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.874477 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.906882 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data" (OuterVolumeSpecName: "config-data") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.930178 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf" (OuterVolumeSpecName: "server-conf") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959867 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959900 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959935 4793 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/be1afc99-1852-4e3b-a2e7-e9beab138334-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959945 4793 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959954 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjjfq\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-kube-api-access-fjjfq\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959962 4793 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/be1afc99-1852-4e3b-a2e7-e9beab138334-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959971 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.959979 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.960009 4793 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/be1afc99-1852-4e3b-a2e7-e9beab138334-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.960040 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.987419 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 27 20:48:36 crc kubenswrapper[4793]: I0127 20:48:36.997885 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "be1afc99-1852-4e3b-a2e7-e9beab138334" (UID: "be1afc99-1852-4e3b-a2e7-e9beab138334"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.064642 4793 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/be1afc99-1852-4e3b-a2e7-e9beab138334-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.064685 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.125069 4793 generic.go:334] "Generic (PLEG): container finished" podID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerID="43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495" exitCode=0 Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.125247 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.129538 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.129566 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerDied","Data":"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495"} Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.129899 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"be1afc99-1852-4e3b-a2e7-e9beab138334","Type":"ContainerDied","Data":"38b0f4e57647aeced9bf653adf9e87972c42db43af04247addd23e43ac07fe02"} Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.130252 4793 scope.go:117] "RemoveContainer" containerID="43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.160981 4793 scope.go:117] "RemoveContainer" containerID="c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.194209 4793 scope.go:117] "RemoveContainer" containerID="43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.194828 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495\": container with ID starting with 43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495 not found: ID does not exist" containerID="43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.194872 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495"} err="failed to get container status \"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495\": rpc error: code = NotFound desc = could not find container \"43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495\": container with ID starting with 43a2774eee9b2655c70f1d1d671e39de4b5a1753a8c3c45cdec6a6d4dcf1f495 not found: ID does not exist" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.194902 4793 scope.go:117] "RemoveContainer" containerID="c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.195218 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa\": container with ID starting with c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa not found: ID does not exist" containerID="c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.195255 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa"} err="failed to get container status \"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa\": rpc error: code = NotFound desc = could not find container \"c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa\": container with ID starting with c6abbef47f67e638090a5c00620726d933ca1e66b204ba6c8fb3d01433ffb8aa not found: ID does not exist" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.198712 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.232696 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.244134 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.254354 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.264247 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.264950 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.264972 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.264994 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265000 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265013 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="setup-container" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265024 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="setup-container" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265035 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="extract-content" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265041 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="extract-content" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265052 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265057 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265070 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="extract-utilities" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265076 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="extract-utilities" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265100 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265106 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265118 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="setup-container" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265123 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="setup-container" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265135 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="extract-content" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265141 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="extract-content" Jan 27 20:48:37 crc kubenswrapper[4793]: E0127 20:48:37.265151 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="extract-utilities" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265157 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="extract-utilities" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265357 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d3b109d-c597-40af-a65a-2554626509fc" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265385 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a39b9f6-af38-406f-8d7a-ed03d3ca476e" containerName="registry-server" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265406 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.265414 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" containerName="rabbitmq" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.266662 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.280275 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.280604 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.281839 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.282076 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.282456 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.282658 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.283165 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kg5s2" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.295605 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.315604 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.318508 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329015 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329291 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329434 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-fp72z" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329560 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329703 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.329816 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.332207 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.340541 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372159 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddj8s\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-kube-api-access-ddj8s\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372565 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372604 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372672 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372696 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b5f0924-d10f-4e93-963c-de03d16f48c1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372764 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372786 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372823 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372903 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372938 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b5f0924-d10f-4e93-963c-de03d16f48c1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.372988 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.474896 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.474951 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b5f0924-d10f-4e93-963c-de03d16f48c1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.474980 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475020 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475064 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475179 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475273 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475311 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475336 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4797dd0-4754-4037-983f-64d2aa1fa902-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475380 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmrhq\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-kube-api-access-lmrhq\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475401 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475485 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475513 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475606 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475642 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b5f0924-d10f-4e93-963c-de03d16f48c1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475712 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475794 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475828 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddj8s\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-kube-api-access-ddj8s\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475874 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-config-data\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.475941 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476039 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476153 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476298 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4797dd0-4754-4037-983f-64d2aa1fa902-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476336 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476367 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476308 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.476949 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.477566 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b5f0924-d10f-4e93-963c-de03d16f48c1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.481170 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.481625 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.482221 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b5f0924-d10f-4e93-963c-de03d16f48c1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.483118 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b5f0924-d10f-4e93-963c-de03d16f48c1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.502895 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddj8s\" (UniqueName: \"kubernetes.io/projected/6b5f0924-d10f-4e93-963c-de03d16f48c1-kube-api-access-ddj8s\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.517507 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b5f0924-d10f-4e93-963c-de03d16f48c1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578353 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578430 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578493 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578513 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4797dd0-4754-4037-983f-64d2aa1fa902-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578560 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmrhq\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-kube-api-access-lmrhq\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578615 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578638 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578710 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578741 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-config-data\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578773 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.578871 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4797dd0-4754-4037-983f-64d2aa1fa902-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.579187 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.579361 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.579502 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.580040 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-config-data\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.580213 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.580298 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4797dd0-4754-4037-983f-64d2aa1fa902-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.583288 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4797dd0-4754-4037-983f-64d2aa1fa902-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.584059 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.584498 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4797dd0-4754-4037-983f-64d2aa1fa902-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.585238 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.588737 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.598654 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmrhq\" (UniqueName: \"kubernetes.io/projected/c4797dd0-4754-4037-983f-64d2aa1fa902-kube-api-access-lmrhq\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.632523 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"c4797dd0-4754-4037-983f-64d2aa1fa902\") " pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.656035 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.821090 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="986a3fd9-9573-46d3-a71d-b3fbe5fd87f7" path="/var/lib/kubelet/pods/986a3fd9-9573-46d3-a71d-b3fbe5fd87f7/volumes" Jan 27 20:48:37 crc kubenswrapper[4793]: I0127 20:48:37.823151 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be1afc99-1852-4e3b-a2e7-e9beab138334" path="/var/lib/kubelet/pods/be1afc99-1852-4e3b-a2e7-e9beab138334/volumes" Jan 27 20:48:38 crc kubenswrapper[4793]: I0127 20:48:38.175014 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 20:48:38 crc kubenswrapper[4793]: I0127 20:48:38.270229 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 20:48:39 crc kubenswrapper[4793]: I0127 20:48:39.147170 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b5f0924-d10f-4e93-963c-de03d16f48c1","Type":"ContainerStarted","Data":"7e879b465f21209f01633c95f119de619abdd07bb250ab91ef5c5412c576f9a7"} Jan 27 20:48:39 crc kubenswrapper[4793]: I0127 20:48:39.148358 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c4797dd0-4754-4037-983f-64d2aa1fa902","Type":"ContainerStarted","Data":"981fc431b66866a1f36e014c196f475629b27a67a08313a8feebd018e21e4551"} Jan 27 20:48:40 crc kubenswrapper[4793]: I0127 20:48:40.158773 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b5f0924-d10f-4e93-963c-de03d16f48c1","Type":"ContainerStarted","Data":"f7ba1d9bd678a3fc2a79c89659023decbfd5f7a5a918d7e56fbc37fbcc02d959"} Jan 27 20:48:40 crc kubenswrapper[4793]: I0127 20:48:40.161852 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c4797dd0-4754-4037-983f-64d2aa1fa902","Type":"ContainerStarted","Data":"5abdb1fedc87fe9592e0de28d8ac40c184473462cd56915e72a426335386d273"} Jan 27 20:48:42 crc kubenswrapper[4793]: I0127 20:48:42.803250 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:42 crc kubenswrapper[4793]: E0127 20:48:42.803954 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.542133 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.544806 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.584945 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.587251 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688430 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688676 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688700 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688859 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688889 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p66xg\" (UniqueName: \"kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.688956 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.689025 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.791303 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792008 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792143 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792392 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792535 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p66xg\" (UniqueName: \"kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792707 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792877 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792884 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.792919 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.793202 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.793441 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.793835 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.794063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.826179 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p66xg\" (UniqueName: \"kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg\") pod \"dnsmasq-dns-bbd76bc6c-qwhjr\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:48 crc kubenswrapper[4793]: I0127 20:48:48.906741 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:49 crc kubenswrapper[4793]: I0127 20:48:49.407257 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:48:50 crc kubenswrapper[4793]: I0127 20:48:50.353900 4793 generic.go:334] "Generic (PLEG): container finished" podID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerID="a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c" exitCode=0 Jan 27 20:48:50 crc kubenswrapper[4793]: I0127 20:48:50.353948 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" event={"ID":"0c608208-9f64-4b53-b3ac-759356b9ab48","Type":"ContainerDied","Data":"a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c"} Jan 27 20:48:50 crc kubenswrapper[4793]: I0127 20:48:50.354215 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" event={"ID":"0c608208-9f64-4b53-b3ac-759356b9ab48","Type":"ContainerStarted","Data":"4e7e5f3b203a650b9a9665fdd55b864f340fa7ede8a3fc7034283d0d928ed144"} Jan 27 20:48:51 crc kubenswrapper[4793]: I0127 20:48:51.431652 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" event={"ID":"0c608208-9f64-4b53-b3ac-759356b9ab48","Type":"ContainerStarted","Data":"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf"} Jan 27 20:48:51 crc kubenswrapper[4793]: I0127 20:48:51.432162 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:51 crc kubenswrapper[4793]: I0127 20:48:51.458366 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" podStartSLOduration=3.458328014 podStartE2EDuration="3.458328014s" podCreationTimestamp="2026-01-27 20:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:48:51.45016886 +0000 UTC m=+2756.840422016" watchObservedRunningTime="2026-01-27 20:48:51.458328014 +0000 UTC m=+2756.848581170" Jan 27 20:48:54 crc kubenswrapper[4793]: I0127 20:48:54.803819 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:48:54 crc kubenswrapper[4793]: E0127 20:48:54.804601 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:48:58 crc kubenswrapper[4793]: I0127 20:48:58.908438 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:48:58 crc kubenswrapper[4793]: I0127 20:48:58.972123 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:48:58 crc kubenswrapper[4793]: I0127 20:48:58.972411 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-646848474f-brx6x" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="dnsmasq-dns" containerID="cri-o://bc4d58d02b7e4028c571fce5fc335a9ab5c2c8570fcc2481487c27389ae4f848" gracePeriod=10 Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.332983 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-667cf85d85-npcsc"] Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.335023 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.424417 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667cf85d85-npcsc"] Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.447821 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-openstack-edpm-ipam\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.448659 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-svc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.448713 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-nb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.449073 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-config\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.449284 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-sb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.449405 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-swift-storage-0\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.449526 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l96wc\" (UniqueName: \"kubernetes.io/projected/822c33e2-e40a-4194-8ec0-f413e4915457-kube-api-access-l96wc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.546454 4793 generic.go:334] "Generic (PLEG): container finished" podID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerID="bc4d58d02b7e4028c571fce5fc335a9ab5c2c8570fcc2481487c27389ae4f848" exitCode=0 Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.546536 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerDied","Data":"bc4d58d02b7e4028c571fce5fc335a9ab5c2c8570fcc2481487c27389ae4f848"} Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.554894 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-nb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.555453 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-config\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.555836 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-sb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.555976 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-swift-storage-0\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.556095 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l96wc\" (UniqueName: \"kubernetes.io/projected/822c33e2-e40a-4194-8ec0-f413e4915457-kube-api-access-l96wc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.556255 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-openstack-edpm-ipam\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.556307 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-svc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.556653 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-nb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.557868 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-config\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.557997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-ovsdbserver-sb\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.558619 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-swift-storage-0\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.558950 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-openstack-edpm-ipam\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.558973 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/822c33e2-e40a-4194-8ec0-f413e4915457-dns-svc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.589684 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l96wc\" (UniqueName: \"kubernetes.io/projected/822c33e2-e40a-4194-8ec0-f413e4915457-kube-api-access-l96wc\") pod \"dnsmasq-dns-667cf85d85-npcsc\" (UID: \"822c33e2-e40a-4194-8ec0-f413e4915457\") " pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.663187 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.800176 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.966920 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.966983 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.967110 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.967137 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.967185 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.967301 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvvr2\" (UniqueName: \"kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2\") pod \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\" (UID: \"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04\") " Jan 27 20:48:59 crc kubenswrapper[4793]: I0127 20:48:59.988188 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2" (OuterVolumeSpecName: "kube-api-access-kvvr2") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "kube-api-access-kvvr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.049743 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.057788 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.071660 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.071698 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvvr2\" (UniqueName: \"kubernetes.io/projected/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-kube-api-access-kvvr2\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.071709 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.073088 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config" (OuterVolumeSpecName: "config") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.082934 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.095214 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" (UID: "2335f058-4ecb-4a36-9a9e-7cfa8d90ac04"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.174061 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.174107 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.174121 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.268490 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667cf85d85-npcsc"] Jan 27 20:49:00 crc kubenswrapper[4793]: W0127 20:49:00.290089 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod822c33e2_e40a_4194_8ec0_f413e4915457.slice/crio-5bb7ada8c5c59f5ea59e1c42225e3e54774a9083f639851cee3841c92aa13985 WatchSource:0}: Error finding container 5bb7ada8c5c59f5ea59e1c42225e3e54774a9083f639851cee3841c92aa13985: Status 404 returned error can't find the container with id 5bb7ada8c5c59f5ea59e1c42225e3e54774a9083f639851cee3841c92aa13985 Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.558427 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" event={"ID":"822c33e2-e40a-4194-8ec0-f413e4915457","Type":"ContainerStarted","Data":"5bb7ada8c5c59f5ea59e1c42225e3e54774a9083f639851cee3841c92aa13985"} Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.561258 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-646848474f-brx6x" event={"ID":"2335f058-4ecb-4a36-9a9e-7cfa8d90ac04","Type":"ContainerDied","Data":"d3a4a232b108a75c5f61e26466c0bee0ee67aebd372083d26381d4998286e7f8"} Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.561300 4793 scope.go:117] "RemoveContainer" containerID="bc4d58d02b7e4028c571fce5fc335a9ab5c2c8570fcc2481487c27389ae4f848" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.561342 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-646848474f-brx6x" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.591171 4793 scope.go:117] "RemoveContainer" containerID="9574fc337eb21abe8fdaaf47ce3bee1cce78d013821415913dec93539f1d6231" Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.607379 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:49:00 crc kubenswrapper[4793]: I0127 20:49:00.617898 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-646848474f-brx6x"] Jan 27 20:49:01 crc kubenswrapper[4793]: I0127 20:49:01.570267 4793 generic.go:334] "Generic (PLEG): container finished" podID="822c33e2-e40a-4194-8ec0-f413e4915457" containerID="547fa658414de53f1cbb2d1ea341446862ed0bb1121e67a1e40a1e1fc9c48541" exitCode=0 Jan 27 20:49:01 crc kubenswrapper[4793]: I0127 20:49:01.570335 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" event={"ID":"822c33e2-e40a-4194-8ec0-f413e4915457","Type":"ContainerDied","Data":"547fa658414de53f1cbb2d1ea341446862ed0bb1121e67a1e40a1e1fc9c48541"} Jan 27 20:49:01 crc kubenswrapper[4793]: I0127 20:49:01.814351 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" path="/var/lib/kubelet/pods/2335f058-4ecb-4a36-9a9e-7cfa8d90ac04/volumes" Jan 27 20:49:02 crc kubenswrapper[4793]: I0127 20:49:02.583505 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" event={"ID":"822c33e2-e40a-4194-8ec0-f413e4915457","Type":"ContainerStarted","Data":"33199fdbe494ae541177c18bc732317f34462f208da7bb07868ddb75086584fa"} Jan 27 20:49:02 crc kubenswrapper[4793]: I0127 20:49:02.583683 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:49:02 crc kubenswrapper[4793]: I0127 20:49:02.615599 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" podStartSLOduration=3.61557604 podStartE2EDuration="3.61557604s" podCreationTimestamp="2026-01-27 20:48:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:49:02.612028071 +0000 UTC m=+2768.002281227" watchObservedRunningTime="2026-01-27 20:49:02.61557604 +0000 UTC m=+2768.005829196" Jan 27 20:49:08 crc kubenswrapper[4793]: I0127 20:49:08.804220 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:49:08 crc kubenswrapper[4793]: E0127 20:49:08.805148 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:49:09 crc kubenswrapper[4793]: I0127 20:49:09.665381 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-667cf85d85-npcsc" Jan 27 20:49:09 crc kubenswrapper[4793]: I0127 20:49:09.747696 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:49:09 crc kubenswrapper[4793]: I0127 20:49:09.747981 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="dnsmasq-dns" containerID="cri-o://66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf" gracePeriod=10 Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.525799 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611499 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611583 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611615 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p66xg\" (UniqueName: \"kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611719 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611778 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611833 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.611877 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config\") pod \"0c608208-9f64-4b53-b3ac-759356b9ab48\" (UID: \"0c608208-9f64-4b53-b3ac-759356b9ab48\") " Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.639813 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg" (OuterVolumeSpecName: "kube-api-access-p66xg") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "kube-api-access-p66xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.656846 4793 generic.go:334] "Generic (PLEG): container finished" podID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerID="66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf" exitCode=0 Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.656893 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" event={"ID":"0c608208-9f64-4b53-b3ac-759356b9ab48","Type":"ContainerDied","Data":"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf"} Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.656920 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" event={"ID":"0c608208-9f64-4b53-b3ac-759356b9ab48","Type":"ContainerDied","Data":"4e7e5f3b203a650b9a9665fdd55b864f340fa7ede8a3fc7034283d0d928ed144"} Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.656932 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbd76bc6c-qwhjr" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.656937 4793 scope.go:117] "RemoveContainer" containerID="66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.670496 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.675359 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.683495 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.685204 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.687210 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config" (OuterVolumeSpecName: "config") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.708874 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0c608208-9f64-4b53-b3ac-759356b9ab48" (UID: "0c608208-9f64-4b53-b3ac-759356b9ab48"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.714913 4793 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.714955 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.714968 4793 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.714979 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-config\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.714990 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.715002 4793 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0c608208-9f64-4b53-b3ac-759356b9ab48-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.715013 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p66xg\" (UniqueName: \"kubernetes.io/projected/0c608208-9f64-4b53-b3ac-759356b9ab48-kube-api-access-p66xg\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.778875 4793 scope.go:117] "RemoveContainer" containerID="a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.799523 4793 scope.go:117] "RemoveContainer" containerID="66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf" Jan 27 20:49:10 crc kubenswrapper[4793]: E0127 20:49:10.800274 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf\": container with ID starting with 66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf not found: ID does not exist" containerID="66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.800314 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf"} err="failed to get container status \"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf\": rpc error: code = NotFound desc = could not find container \"66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf\": container with ID starting with 66b9a7baa4363600c4b9ba70ef11bc5bb34e5554d11bcd7c95cb615712f03dbf not found: ID does not exist" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.800339 4793 scope.go:117] "RemoveContainer" containerID="a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c" Jan 27 20:49:10 crc kubenswrapper[4793]: E0127 20:49:10.800917 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c\": container with ID starting with a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c not found: ID does not exist" containerID="a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.800944 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c"} err="failed to get container status \"a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c\": rpc error: code = NotFound desc = could not find container \"a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c\": container with ID starting with a8dd6c1567f9cd31753984af880c59ad23c0bfa2cb10ebcc10cdd24382d52d2c not found: ID does not exist" Jan 27 20:49:10 crc kubenswrapper[4793]: I0127 20:49:10.992293 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:49:11 crc kubenswrapper[4793]: I0127 20:49:11.003321 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbd76bc6c-qwhjr"] Jan 27 20:49:11 crc kubenswrapper[4793]: E0127 20:49:11.187121 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c608208_9f64_4b53_b3ac_759356b9ab48.slice/crio-4e7e5f3b203a650b9a9665fdd55b864f340fa7ede8a3fc7034283d0d928ed144\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c608208_9f64_4b53_b3ac_759356b9ab48.slice\": RecentStats: unable to find data in memory cache]" Jan 27 20:49:11 crc kubenswrapper[4793]: I0127 20:49:11.847591 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" path="/var/lib/kubelet/pods/0c608208-9f64-4b53-b3ac-759356b9ab48/volumes" Jan 27 20:49:12 crc kubenswrapper[4793]: I0127 20:49:12.679900 4793 generic.go:334] "Generic (PLEG): container finished" podID="6b5f0924-d10f-4e93-963c-de03d16f48c1" containerID="f7ba1d9bd678a3fc2a79c89659023decbfd5f7a5a918d7e56fbc37fbcc02d959" exitCode=0 Jan 27 20:49:12 crc kubenswrapper[4793]: I0127 20:49:12.680023 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b5f0924-d10f-4e93-963c-de03d16f48c1","Type":"ContainerDied","Data":"f7ba1d9bd678a3fc2a79c89659023decbfd5f7a5a918d7e56fbc37fbcc02d959"} Jan 27 20:49:12 crc kubenswrapper[4793]: I0127 20:49:12.682589 4793 generic.go:334] "Generic (PLEG): container finished" podID="c4797dd0-4754-4037-983f-64d2aa1fa902" containerID="5abdb1fedc87fe9592e0de28d8ac40c184473462cd56915e72a426335386d273" exitCode=0 Jan 27 20:49:12 crc kubenswrapper[4793]: I0127 20:49:12.682666 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c4797dd0-4754-4037-983f-64d2aa1fa902","Type":"ContainerDied","Data":"5abdb1fedc87fe9592e0de28d8ac40c184473462cd56915e72a426335386d273"} Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.720350 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b5f0924-d10f-4e93-963c-de03d16f48c1","Type":"ContainerStarted","Data":"c60806c8090ca8b8b4f254e81f8ad457fbe838867fc4ede5c2e76469c1f23d61"} Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.720887 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.722738 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c4797dd0-4754-4037-983f-64d2aa1fa902","Type":"ContainerStarted","Data":"7358e423061c1e7e2b81b1b26800f9d28ca6e2a97e889e092b727dd39f087e40"} Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.722868 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.748111 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.748089687 podStartE2EDuration="36.748089687s" podCreationTimestamp="2026-01-27 20:48:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:49:13.746024205 +0000 UTC m=+2779.136277371" watchObservedRunningTime="2026-01-27 20:49:13.748089687 +0000 UTC m=+2779.138342863" Jan 27 20:49:13 crc kubenswrapper[4793]: I0127 20:49:13.781301 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.781272048 podStartE2EDuration="36.781272048s" podCreationTimestamp="2026-01-27 20:48:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 20:49:13.771846191 +0000 UTC m=+2779.162099357" watchObservedRunningTime="2026-01-27 20:49:13.781272048 +0000 UTC m=+2779.171525234" Jan 27 20:49:20 crc kubenswrapper[4793]: I0127 20:49:20.804029 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:49:20 crc kubenswrapper[4793]: E0127 20:49:20.804869 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.884415 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb"] Jan 27 20:49:23 crc kubenswrapper[4793]: E0127 20:49:23.885461 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885480 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: E0127 20:49:23.885511 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885518 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: E0127 20:49:23.885528 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="init" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885534 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="init" Jan 27 20:49:23 crc kubenswrapper[4793]: E0127 20:49:23.885573 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="init" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885582 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="init" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885813 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c608208-9f64-4b53-b3ac-759356b9ab48" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.885830 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2335f058-4ecb-4a36-9a9e-7cfa8d90ac04" containerName="dnsmasq-dns" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.886740 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.892650 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.897624 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb"] Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.897884 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.898156 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:49:23 crc kubenswrapper[4793]: I0127 20:49:23.907662 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.038587 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.038663 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkct6\" (UniqueName: \"kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.038689 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.038997 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.141352 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.141452 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkct6\" (UniqueName: \"kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.141497 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.141606 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.149153 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.149644 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.149733 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.164434 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkct6\" (UniqueName: \"kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.217971 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.944757 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb"] Jan 27 20:49:24 crc kubenswrapper[4793]: W0127 20:49:24.955867 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod597edc01_51d3_4199_ae65_a0439d6bbf66.slice/crio-9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4 WatchSource:0}: Error finding container 9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4: Status 404 returned error can't find the container with id 9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4 Jan 27 20:49:24 crc kubenswrapper[4793]: I0127 20:49:24.959543 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:49:25 crc kubenswrapper[4793]: I0127 20:49:25.839916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" event={"ID":"597edc01-51d3-4199-ae65-a0439d6bbf66","Type":"ContainerStarted","Data":"9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4"} Jan 27 20:49:27 crc kubenswrapper[4793]: I0127 20:49:27.590348 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="6b5f0924-d10f-4e93-963c-de03d16f48c1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.234:5671: connect: connection refused" Jan 27 20:49:27 crc kubenswrapper[4793]: I0127 20:49:27.659807 4793 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="c4797dd0-4754-4037-983f-64d2aa1fa902" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.235:5671: connect: connection refused" Jan 27 20:49:32 crc kubenswrapper[4793]: I0127 20:49:32.928398 4793 scope.go:117] "RemoveContainer" containerID="1769161e8634efcc339439e61f08418e2f508bc1000ed8a23a5128fa680a685d" Jan 27 20:49:35 crc kubenswrapper[4793]: I0127 20:49:35.231144 4793 scope.go:117] "RemoveContainer" containerID="642c4da046cb49592c2dbdb5ea15fec53c4d366602f78d9dd9a9adc6392c53a3" Jan 27 20:49:35 crc kubenswrapper[4793]: I0127 20:49:35.818411 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:49:35 crc kubenswrapper[4793]: E0127 20:49:35.819164 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:49:35 crc kubenswrapper[4793]: I0127 20:49:35.926321 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" event={"ID":"597edc01-51d3-4199-ae65-a0439d6bbf66","Type":"ContainerStarted","Data":"f5aa3cd719ad60806ec65f4c5c5cb67a2203e8e8c32dfbd563db326375becf75"} Jan 27 20:49:35 crc kubenswrapper[4793]: I0127 20:49:35.960213 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" podStartSLOduration=2.612529363 podStartE2EDuration="12.960188486s" podCreationTimestamp="2026-01-27 20:49:23 +0000 UTC" firstStartedPulling="2026-01-27 20:49:24.959214752 +0000 UTC m=+2790.349467908" lastFinishedPulling="2026-01-27 20:49:35.306873875 +0000 UTC m=+2800.697127031" observedRunningTime="2026-01-27 20:49:35.944271239 +0000 UTC m=+2801.334524435" watchObservedRunningTime="2026-01-27 20:49:35.960188486 +0000 UTC m=+2801.350441652" Jan 27 20:49:37 crc kubenswrapper[4793]: I0127 20:49:37.591858 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 27 20:49:37 crc kubenswrapper[4793]: I0127 20:49:37.662228 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 27 20:49:47 crc kubenswrapper[4793]: I0127 20:49:47.803811 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:49:47 crc kubenswrapper[4793]: E0127 20:49:47.804845 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:49:48 crc kubenswrapper[4793]: I0127 20:49:48.041191 4793 generic.go:334] "Generic (PLEG): container finished" podID="597edc01-51d3-4199-ae65-a0439d6bbf66" containerID="f5aa3cd719ad60806ec65f4c5c5cb67a2203e8e8c32dfbd563db326375becf75" exitCode=0 Jan 27 20:49:48 crc kubenswrapper[4793]: I0127 20:49:48.041288 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" event={"ID":"597edc01-51d3-4199-ae65-a0439d6bbf66","Type":"ContainerDied","Data":"f5aa3cd719ad60806ec65f4c5c5cb67a2203e8e8c32dfbd563db326375becf75"} Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.619557 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.781566 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory\") pod \"597edc01-51d3-4199-ae65-a0439d6bbf66\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.781994 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkct6\" (UniqueName: \"kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6\") pod \"597edc01-51d3-4199-ae65-a0439d6bbf66\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.782167 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle\") pod \"597edc01-51d3-4199-ae65-a0439d6bbf66\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.782216 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam\") pod \"597edc01-51d3-4199-ae65-a0439d6bbf66\" (UID: \"597edc01-51d3-4199-ae65-a0439d6bbf66\") " Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.789257 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "597edc01-51d3-4199-ae65-a0439d6bbf66" (UID: "597edc01-51d3-4199-ae65-a0439d6bbf66"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.790299 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6" (OuterVolumeSpecName: "kube-api-access-gkct6") pod "597edc01-51d3-4199-ae65-a0439d6bbf66" (UID: "597edc01-51d3-4199-ae65-a0439d6bbf66"). InnerVolumeSpecName "kube-api-access-gkct6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.866014 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "597edc01-51d3-4199-ae65-a0439d6bbf66" (UID: "597edc01-51d3-4199-ae65-a0439d6bbf66"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.866125 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory" (OuterVolumeSpecName: "inventory") pod "597edc01-51d3-4199-ae65-a0439d6bbf66" (UID: "597edc01-51d3-4199-ae65-a0439d6bbf66"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.885152 4793 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.885198 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.885216 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/597edc01-51d3-4199-ae65-a0439d6bbf66-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:49 crc kubenswrapper[4793]: I0127 20:49:49.885228 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkct6\" (UniqueName: \"kubernetes.io/projected/597edc01-51d3-4199-ae65-a0439d6bbf66-kube-api-access-gkct6\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.059110 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" event={"ID":"597edc01-51d3-4199-ae65-a0439d6bbf66","Type":"ContainerDied","Data":"9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4"} Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.059152 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9887a70ff67566a8cbac39bfcbac29f629d168d50ca129a221b4877cff71a6d4" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.059154 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.156757 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5"] Jan 27 20:49:50 crc kubenswrapper[4793]: E0127 20:49:50.157356 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="597edc01-51d3-4199-ae65-a0439d6bbf66" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.157376 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="597edc01-51d3-4199-ae65-a0439d6bbf66" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.157606 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="597edc01-51d3-4199-ae65-a0439d6bbf66" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.158508 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.161763 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.162098 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.163858 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.164026 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.185738 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5"] Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.292674 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4w95\" (UniqueName: \"kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.292897 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.293008 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.395264 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4w95\" (UniqueName: \"kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.396039 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.396114 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.403361 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.403489 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.415311 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4w95\" (UniqueName: \"kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-cfnm5\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:50 crc kubenswrapper[4793]: I0127 20:49:50.484337 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:51 crc kubenswrapper[4793]: I0127 20:49:51.023496 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5"] Jan 27 20:49:51 crc kubenswrapper[4793]: I0127 20:49:51.069739 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" event={"ID":"41c7f13a-589b-496e-9709-e5270fb0e6aa","Type":"ContainerStarted","Data":"d26cd1a9a3e1a4bf54f58a70adf5281561c74da09f22ec5a2096355a69dedbbf"} Jan 27 20:49:52 crc kubenswrapper[4793]: I0127 20:49:52.079473 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" event={"ID":"41c7f13a-589b-496e-9709-e5270fb0e6aa","Type":"ContainerStarted","Data":"d2393ff61e7d943ed61505b3cf00c53734a3aedbd0ccb298f2c33721d0b1398c"} Jan 27 20:49:52 crc kubenswrapper[4793]: I0127 20:49:52.097357 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" podStartSLOduration=1.64943153 podStartE2EDuration="2.09733332s" podCreationTimestamp="2026-01-27 20:49:50 +0000 UTC" firstStartedPulling="2026-01-27 20:49:51.031148886 +0000 UTC m=+2816.421402042" lastFinishedPulling="2026-01-27 20:49:51.479050676 +0000 UTC m=+2816.869303832" observedRunningTime="2026-01-27 20:49:52.095018233 +0000 UTC m=+2817.485271389" watchObservedRunningTime="2026-01-27 20:49:52.09733332 +0000 UTC m=+2817.487586486" Jan 27 20:49:55 crc kubenswrapper[4793]: I0127 20:49:55.108097 4793 generic.go:334] "Generic (PLEG): container finished" podID="41c7f13a-589b-496e-9709-e5270fb0e6aa" containerID="d2393ff61e7d943ed61505b3cf00c53734a3aedbd0ccb298f2c33721d0b1398c" exitCode=0 Jan 27 20:49:55 crc kubenswrapper[4793]: I0127 20:49:55.108190 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" event={"ID":"41c7f13a-589b-496e-9709-e5270fb0e6aa","Type":"ContainerDied","Data":"d2393ff61e7d943ed61505b3cf00c53734a3aedbd0ccb298f2c33721d0b1398c"} Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.571775 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.761954 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory\") pod \"41c7f13a-589b-496e-9709-e5270fb0e6aa\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.762537 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam\") pod \"41c7f13a-589b-496e-9709-e5270fb0e6aa\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.763118 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4w95\" (UniqueName: \"kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95\") pod \"41c7f13a-589b-496e-9709-e5270fb0e6aa\" (UID: \"41c7f13a-589b-496e-9709-e5270fb0e6aa\") " Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.768895 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95" (OuterVolumeSpecName: "kube-api-access-m4w95") pod "41c7f13a-589b-496e-9709-e5270fb0e6aa" (UID: "41c7f13a-589b-496e-9709-e5270fb0e6aa"). InnerVolumeSpecName "kube-api-access-m4w95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.789243 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory" (OuterVolumeSpecName: "inventory") pod "41c7f13a-589b-496e-9709-e5270fb0e6aa" (UID: "41c7f13a-589b-496e-9709-e5270fb0e6aa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.795383 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "41c7f13a-589b-496e-9709-e5270fb0e6aa" (UID: "41c7f13a-589b-496e-9709-e5270fb0e6aa"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.866076 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.866109 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/41c7f13a-589b-496e-9709-e5270fb0e6aa-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:56 crc kubenswrapper[4793]: I0127 20:49:56.866120 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4w95\" (UniqueName: \"kubernetes.io/projected/41c7f13a-589b-496e-9709-e5270fb0e6aa-kube-api-access-m4w95\") on node \"crc\" DevicePath \"\"" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.129430 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" event={"ID":"41c7f13a-589b-496e-9709-e5270fb0e6aa","Type":"ContainerDied","Data":"d26cd1a9a3e1a4bf54f58a70adf5281561c74da09f22ec5a2096355a69dedbbf"} Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.129479 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d26cd1a9a3e1a4bf54f58a70adf5281561c74da09f22ec5a2096355a69dedbbf" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.129580 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-cfnm5" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.271996 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm"] Jan 27 20:49:57 crc kubenswrapper[4793]: E0127 20:49:57.272618 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c7f13a-589b-496e-9709-e5270fb0e6aa" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.272648 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c7f13a-589b-496e-9709-e5270fb0e6aa" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.272890 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="41c7f13a-589b-496e-9709-e5270fb0e6aa" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.273974 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.277044 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.277114 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.277937 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.278572 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.281274 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm"] Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.293919 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.294019 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvqhl\" (UniqueName: \"kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.294045 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.294074 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.395892 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.396029 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.396064 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvqhl\" (UniqueName: \"kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.397992 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.399514 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.402273 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.402906 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.417991 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvqhl\" (UniqueName: \"kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:57 crc kubenswrapper[4793]: I0127 20:49:57.597477 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:49:58 crc kubenswrapper[4793]: I0127 20:49:58.172470 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm"] Jan 27 20:49:59 crc kubenswrapper[4793]: I0127 20:49:59.149013 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" event={"ID":"b9480a2b-9979-4554-a98e-143e758ba256","Type":"ContainerStarted","Data":"e89b097b622747239a9ce85d2359f82a98c6861c2e5fd2dbf5384dca4dc8fad0"} Jan 27 20:49:59 crc kubenswrapper[4793]: I0127 20:49:59.149877 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" event={"ID":"b9480a2b-9979-4554-a98e-143e758ba256","Type":"ContainerStarted","Data":"80f39ab4a355ab4b64df6f8742f58d92b3b49d9eeeac5376675e4813102fd3ea"} Jan 27 20:49:59 crc kubenswrapper[4793]: I0127 20:49:59.175363 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" podStartSLOduration=1.732106358 podStartE2EDuration="2.175342161s" podCreationTimestamp="2026-01-27 20:49:57 +0000 UTC" firstStartedPulling="2026-01-27 20:49:58.184204904 +0000 UTC m=+2823.574458060" lastFinishedPulling="2026-01-27 20:49:58.627440707 +0000 UTC m=+2824.017693863" observedRunningTime="2026-01-27 20:49:59.167449733 +0000 UTC m=+2824.557702889" watchObservedRunningTime="2026-01-27 20:49:59.175342161 +0000 UTC m=+2824.565595317" Jan 27 20:50:02 crc kubenswrapper[4793]: I0127 20:50:02.804045 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:50:02 crc kubenswrapper[4793]: E0127 20:50:02.804656 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:50:17 crc kubenswrapper[4793]: I0127 20:50:17.804062 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:50:17 crc kubenswrapper[4793]: E0127 20:50:17.804966 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:50:22 crc kubenswrapper[4793]: I0127 20:50:22.753649 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:50:22 crc kubenswrapper[4793]: I0127 20:50:22.754327 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:50:31 crc kubenswrapper[4793]: I0127 20:50:31.803365 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:50:31 crc kubenswrapper[4793]: E0127 20:50:31.804147 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:50:44 crc kubenswrapper[4793]: I0127 20:50:44.803628 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:50:44 crc kubenswrapper[4793]: E0127 20:50:44.804422 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:50:52 crc kubenswrapper[4793]: I0127 20:50:52.753505 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:50:52 crc kubenswrapper[4793]: I0127 20:50:52.754035 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:50:56 crc kubenswrapper[4793]: I0127 20:50:56.803009 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:50:56 crc kubenswrapper[4793]: E0127 20:50:56.803763 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:51:08 crc kubenswrapper[4793]: I0127 20:51:08.802895 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:51:08 crc kubenswrapper[4793]: E0127 20:51:08.803797 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:51:20 crc kubenswrapper[4793]: I0127 20:51:20.803923 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:51:20 crc kubenswrapper[4793]: E0127 20:51:20.804684 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:51:22 crc kubenswrapper[4793]: I0127 20:51:22.753214 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:51:22 crc kubenswrapper[4793]: I0127 20:51:22.753306 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:51:22 crc kubenswrapper[4793]: I0127 20:51:22.753367 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:51:22 crc kubenswrapper[4793]: I0127 20:51:22.754484 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:51:22 crc kubenswrapper[4793]: I0127 20:51:22.754576 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad" gracePeriod=600 Jan 27 20:51:23 crc kubenswrapper[4793]: I0127 20:51:23.001818 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad" exitCode=0 Jan 27 20:51:23 crc kubenswrapper[4793]: I0127 20:51:23.001909 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad"} Jan 27 20:51:23 crc kubenswrapper[4793]: I0127 20:51:23.002212 4793 scope.go:117] "RemoveContainer" containerID="78e0a279deb5bc08c8b96c112ab1f2938d97100654bed2baa59e442b808ebb4f" Jan 27 20:51:24 crc kubenswrapper[4793]: I0127 20:51:24.012851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76"} Jan 27 20:51:31 crc kubenswrapper[4793]: I0127 20:51:31.803982 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:51:31 crc kubenswrapper[4793]: E0127 20:51:31.804857 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:51:45 crc kubenswrapper[4793]: I0127 20:51:45.813226 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:51:45 crc kubenswrapper[4793]: E0127 20:51:45.814146 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.040067 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.043124 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.053939 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.141438 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.141689 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.142207 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tqh7\" (UniqueName: \"kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.243872 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.243945 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.244029 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tqh7\" (UniqueName: \"kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.244451 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.244541 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.267962 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tqh7\" (UniqueName: \"kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7\") pod \"community-operators-zzxh2\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.374651 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:51:50 crc kubenswrapper[4793]: I0127 20:51:50.764805 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:51:51 crc kubenswrapper[4793]: I0127 20:51:51.333322 4793 generic.go:334] "Generic (PLEG): container finished" podID="37a26820-0899-4980-9253-995894aff4dd" containerID="c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da" exitCode=0 Jan 27 20:51:51 crc kubenswrapper[4793]: I0127 20:51:51.333417 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerDied","Data":"c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da"} Jan 27 20:51:51 crc kubenswrapper[4793]: I0127 20:51:51.334694 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerStarted","Data":"53074e5a3348310c1104028137671e8eaed4dac35bc20c4e5e21879e08e258df"} Jan 27 20:51:52 crc kubenswrapper[4793]: I0127 20:51:52.344868 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerStarted","Data":"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4"} Jan 27 20:51:53 crc kubenswrapper[4793]: I0127 20:51:53.354664 4793 generic.go:334] "Generic (PLEG): container finished" podID="37a26820-0899-4980-9253-995894aff4dd" containerID="0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4" exitCode=0 Jan 27 20:51:53 crc kubenswrapper[4793]: I0127 20:51:53.354731 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerDied","Data":"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4"} Jan 27 20:51:54 crc kubenswrapper[4793]: I0127 20:51:54.367601 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerStarted","Data":"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4"} Jan 27 20:51:54 crc kubenswrapper[4793]: I0127 20:51:54.388363 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zzxh2" podStartSLOduration=1.962004262 podStartE2EDuration="4.388343637s" podCreationTimestamp="2026-01-27 20:51:50 +0000 UTC" firstStartedPulling="2026-01-27 20:51:51.335754376 +0000 UTC m=+2936.726007532" lastFinishedPulling="2026-01-27 20:51:53.762093751 +0000 UTC m=+2939.152346907" observedRunningTime="2026-01-27 20:51:54.383930288 +0000 UTC m=+2939.774183444" watchObservedRunningTime="2026-01-27 20:51:54.388343637 +0000 UTC m=+2939.778596793" Jan 27 20:51:57 crc kubenswrapper[4793]: I0127 20:51:57.803928 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:51:57 crc kubenswrapper[4793]: E0127 20:51:57.804743 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:52:00 crc kubenswrapper[4793]: I0127 20:52:00.375772 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:00 crc kubenswrapper[4793]: I0127 20:52:00.376270 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:00 crc kubenswrapper[4793]: I0127 20:52:00.427407 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:00 crc kubenswrapper[4793]: I0127 20:52:00.491726 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:00 crc kubenswrapper[4793]: I0127 20:52:00.669486 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:52:02 crc kubenswrapper[4793]: I0127 20:52:02.437043 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zzxh2" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="registry-server" containerID="cri-o://96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4" gracePeriod=2 Jan 27 20:52:02 crc kubenswrapper[4793]: I0127 20:52:02.924173 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.028325 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities\") pod \"37a26820-0899-4980-9253-995894aff4dd\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.028409 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tqh7\" (UniqueName: \"kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7\") pod \"37a26820-0899-4980-9253-995894aff4dd\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.029268 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities" (OuterVolumeSpecName: "utilities") pod "37a26820-0899-4980-9253-995894aff4dd" (UID: "37a26820-0899-4980-9253-995894aff4dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.029936 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content\") pod \"37a26820-0899-4980-9253-995894aff4dd\" (UID: \"37a26820-0899-4980-9253-995894aff4dd\") " Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.030495 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.035749 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7" (OuterVolumeSpecName: "kube-api-access-4tqh7") pod "37a26820-0899-4980-9253-995894aff4dd" (UID: "37a26820-0899-4980-9253-995894aff4dd"). InnerVolumeSpecName "kube-api-access-4tqh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.077310 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37a26820-0899-4980-9253-995894aff4dd" (UID: "37a26820-0899-4980-9253-995894aff4dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.133096 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tqh7\" (UniqueName: \"kubernetes.io/projected/37a26820-0899-4980-9253-995894aff4dd-kube-api-access-4tqh7\") on node \"crc\" DevicePath \"\"" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.133157 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a26820-0899-4980-9253-995894aff4dd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.448401 4793 generic.go:334] "Generic (PLEG): container finished" podID="37a26820-0899-4980-9253-995894aff4dd" containerID="96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4" exitCode=0 Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.448424 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zzxh2" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.448438 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerDied","Data":"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4"} Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.448849 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zzxh2" event={"ID":"37a26820-0899-4980-9253-995894aff4dd","Type":"ContainerDied","Data":"53074e5a3348310c1104028137671e8eaed4dac35bc20c4e5e21879e08e258df"} Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.448898 4793 scope.go:117] "RemoveContainer" containerID="96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.493594 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.499149 4793 scope.go:117] "RemoveContainer" containerID="0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.503972 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zzxh2"] Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.522596 4793 scope.go:117] "RemoveContainer" containerID="c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.574490 4793 scope.go:117] "RemoveContainer" containerID="96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4" Jan 27 20:52:03 crc kubenswrapper[4793]: E0127 20:52:03.576145 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4\": container with ID starting with 96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4 not found: ID does not exist" containerID="96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.576191 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4"} err="failed to get container status \"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4\": rpc error: code = NotFound desc = could not find container \"96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4\": container with ID starting with 96ec1bf09d0c50c550f17ce397f36d60f86093b5df094601c96e238eccfe9bc4 not found: ID does not exist" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.576218 4793 scope.go:117] "RemoveContainer" containerID="0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4" Jan 27 20:52:03 crc kubenswrapper[4793]: E0127 20:52:03.576769 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4\": container with ID starting with 0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4 not found: ID does not exist" containerID="0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.576791 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4"} err="failed to get container status \"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4\": rpc error: code = NotFound desc = could not find container \"0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4\": container with ID starting with 0edd00380b8f768d894f5a572873bccde546587b80d8cbc44dfef7d0c2140db4 not found: ID does not exist" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.576806 4793 scope.go:117] "RemoveContainer" containerID="c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da" Jan 27 20:52:03 crc kubenswrapper[4793]: E0127 20:52:03.577191 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da\": container with ID starting with c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da not found: ID does not exist" containerID="c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.577245 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da"} err="failed to get container status \"c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da\": rpc error: code = NotFound desc = could not find container \"c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da\": container with ID starting with c4f5ddb6e4846f857a8a0cb0b400ad58a04961459fc759b5770c85e7ad6072da not found: ID does not exist" Jan 27 20:52:03 crc kubenswrapper[4793]: I0127 20:52:03.814720 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37a26820-0899-4980-9253-995894aff4dd" path="/var/lib/kubelet/pods/37a26820-0899-4980-9253-995894aff4dd/volumes" Jan 27 20:52:11 crc kubenswrapper[4793]: I0127 20:52:11.803435 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:52:11 crc kubenswrapper[4793]: E0127 20:52:11.804152 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:52:23 crc kubenswrapper[4793]: I0127 20:52:23.803093 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:52:23 crc kubenswrapper[4793]: E0127 20:52:23.803953 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:52:35 crc kubenswrapper[4793]: I0127 20:52:35.811470 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:52:35 crc kubenswrapper[4793]: E0127 20:52:35.814166 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:52:49 crc kubenswrapper[4793]: I0127 20:52:49.803704 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:52:49 crc kubenswrapper[4793]: E0127 20:52:49.804653 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:03 crc kubenswrapper[4793]: I0127 20:53:03.803436 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:53:03 crc kubenswrapper[4793]: E0127 20:53:03.804143 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:15 crc kubenswrapper[4793]: I0127 20:53:15.810342 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:53:15 crc kubenswrapper[4793]: E0127 20:53:15.811110 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:27 crc kubenswrapper[4793]: I0127 20:53:27.803342 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:53:28 crc kubenswrapper[4793]: I0127 20:53:28.276562 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e"} Jan 27 20:53:32 crc kubenswrapper[4793]: I0127 20:53:32.312468 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" exitCode=1 Jan 27 20:53:32 crc kubenswrapper[4793]: I0127 20:53:32.312683 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e"} Jan 27 20:53:32 crc kubenswrapper[4793]: I0127 20:53:32.313075 4793 scope.go:117] "RemoveContainer" containerID="135ab37126187a401205657e404b5880e3691769cb8457e5fe369a40ef1f4542" Jan 27 20:53:32 crc kubenswrapper[4793]: I0127 20:53:32.314290 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:53:32 crc kubenswrapper[4793]: E0127 20:53:32.314629 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:33 crc kubenswrapper[4793]: I0127 20:53:33.283137 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:53:33 crc kubenswrapper[4793]: I0127 20:53:33.324413 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:53:33 crc kubenswrapper[4793]: E0127 20:53:33.324828 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:38 crc kubenswrapper[4793]: I0127 20:53:38.243001 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:53:38 crc kubenswrapper[4793]: I0127 20:53:38.243602 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:53:38 crc kubenswrapper[4793]: I0127 20:53:38.243615 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:53:38 crc kubenswrapper[4793]: I0127 20:53:38.244490 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:53:38 crc kubenswrapper[4793]: E0127 20:53:38.244832 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:52 crc kubenswrapper[4793]: I0127 20:53:52.753699 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:53:52 crc kubenswrapper[4793]: I0127 20:53:52.754243 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:53:52 crc kubenswrapper[4793]: I0127 20:53:52.804184 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:53:52 crc kubenswrapper[4793]: E0127 20:53:52.804528 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:53:53 crc kubenswrapper[4793]: I0127 20:53:53.522792 4793 generic.go:334] "Generic (PLEG): container finished" podID="b9480a2b-9979-4554-a98e-143e758ba256" containerID="e89b097b622747239a9ce85d2359f82a98c6861c2e5fd2dbf5384dca4dc8fad0" exitCode=0 Jan 27 20:53:53 crc kubenswrapper[4793]: I0127 20:53:53.522853 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" event={"ID":"b9480a2b-9979-4554-a98e-143e758ba256","Type":"ContainerDied","Data":"e89b097b622747239a9ce85d2359f82a98c6861c2e5fd2dbf5384dca4dc8fad0"} Jan 27 20:53:54 crc kubenswrapper[4793]: I0127 20:53:54.994685 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.165226 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory\") pod \"b9480a2b-9979-4554-a98e-143e758ba256\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.165422 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle\") pod \"b9480a2b-9979-4554-a98e-143e758ba256\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.165455 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvqhl\" (UniqueName: \"kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl\") pod \"b9480a2b-9979-4554-a98e-143e758ba256\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.165478 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam\") pod \"b9480a2b-9979-4554-a98e-143e758ba256\" (UID: \"b9480a2b-9979-4554-a98e-143e758ba256\") " Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.171101 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl" (OuterVolumeSpecName: "kube-api-access-gvqhl") pod "b9480a2b-9979-4554-a98e-143e758ba256" (UID: "b9480a2b-9979-4554-a98e-143e758ba256"). InnerVolumeSpecName "kube-api-access-gvqhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.171671 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b9480a2b-9979-4554-a98e-143e758ba256" (UID: "b9480a2b-9979-4554-a98e-143e758ba256"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.197365 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory" (OuterVolumeSpecName: "inventory") pod "b9480a2b-9979-4554-a98e-143e758ba256" (UID: "b9480a2b-9979-4554-a98e-143e758ba256"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.197742 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b9480a2b-9979-4554-a98e-143e758ba256" (UID: "b9480a2b-9979-4554-a98e-143e758ba256"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.268476 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.268904 4793 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.268921 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvqhl\" (UniqueName: \"kubernetes.io/projected/b9480a2b-9979-4554-a98e-143e758ba256-kube-api-access-gvqhl\") on node \"crc\" DevicePath \"\"" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.268929 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9480a2b-9979-4554-a98e-143e758ba256-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.539694 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" event={"ID":"b9480a2b-9979-4554-a98e-143e758ba256","Type":"ContainerDied","Data":"80f39ab4a355ab4b64df6f8742f58d92b3b49d9eeeac5376675e4813102fd3ea"} Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.539744 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80f39ab4a355ab4b64df6f8742f58d92b3b49d9eeeac5376675e4813102fd3ea" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.539742 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.658379 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr"] Jan 27 20:53:55 crc kubenswrapper[4793]: E0127 20:53:55.659055 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="extract-content" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659081 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="extract-content" Jan 27 20:53:55 crc kubenswrapper[4793]: E0127 20:53:55.659124 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="extract-utilities" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659134 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="extract-utilities" Jan 27 20:53:55 crc kubenswrapper[4793]: E0127 20:53:55.659157 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="registry-server" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659166 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="registry-server" Jan 27 20:53:55 crc kubenswrapper[4793]: E0127 20:53:55.659183 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9480a2b-9979-4554-a98e-143e758ba256" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659193 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9480a2b-9979-4554-a98e-143e758ba256" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659494 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9480a2b-9979-4554-a98e-143e758ba256" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.659524 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="37a26820-0899-4980-9253-995894aff4dd" containerName="registry-server" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.660512 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.662320 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.662638 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.663233 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.663675 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.936114 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr"] Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.941870 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.941979 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:55 crc kubenswrapper[4793]: I0127 20:53:55.942071 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t26dd\" (UniqueName: \"kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.043196 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.043361 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t26dd\" (UniqueName: \"kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.043525 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.047001 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.047421 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.059975 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.060575 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t26dd\" (UniqueName: \"kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.061166 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.234912 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.243482 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:53:56 crc kubenswrapper[4793]: I0127 20:53:56.805193 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr"] Jan 27 20:53:57 crc kubenswrapper[4793]: I0127 20:53:57.559619 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" event={"ID":"2ac35f24-57b8-4521-8509-5adc5ae84b60","Type":"ContainerStarted","Data":"133a05f150405977c03aa1c2965abb33600ca101d3d2273aada08e0b77ec1977"} Jan 27 20:53:57 crc kubenswrapper[4793]: I0127 20:53:57.560651 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:53:58 crc kubenswrapper[4793]: I0127 20:53:58.584976 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" event={"ID":"2ac35f24-57b8-4521-8509-5adc5ae84b60","Type":"ContainerStarted","Data":"b846bf496b3fed2109a54a9a08b57b9837fc63f05f51aa764b0e86d90aa594ad"} Jan 27 20:53:58 crc kubenswrapper[4793]: I0127 20:53:58.612625 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" podStartSLOduration=2.868349883 podStartE2EDuration="3.612599308s" podCreationTimestamp="2026-01-27 20:53:55 +0000 UTC" firstStartedPulling="2026-01-27 20:53:56.813652452 +0000 UTC m=+3062.203905618" lastFinishedPulling="2026-01-27 20:53:57.557901887 +0000 UTC m=+3062.948155043" observedRunningTime="2026-01-27 20:53:58.603111282 +0000 UTC m=+3063.993364438" watchObservedRunningTime="2026-01-27 20:53:58.612599308 +0000 UTC m=+3064.002852474" Jan 27 20:54:05 crc kubenswrapper[4793]: I0127 20:54:05.808867 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:54:05 crc kubenswrapper[4793]: E0127 20:54:05.809596 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:54:20 crc kubenswrapper[4793]: I0127 20:54:20.804026 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:54:20 crc kubenswrapper[4793]: E0127 20:54:20.805131 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:54:22 crc kubenswrapper[4793]: I0127 20:54:22.753270 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:54:22 crc kubenswrapper[4793]: I0127 20:54:22.753604 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:54:31 crc kubenswrapper[4793]: I0127 20:54:31.804009 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:54:31 crc kubenswrapper[4793]: E0127 20:54:31.804815 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:54:44 crc kubenswrapper[4793]: I0127 20:54:44.803439 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:54:44 crc kubenswrapper[4793]: E0127 20:54:44.804192 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:54:52 crc kubenswrapper[4793]: I0127 20:54:52.753231 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 20:54:52 crc kubenswrapper[4793]: I0127 20:54:52.753895 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 20:54:52 crc kubenswrapper[4793]: I0127 20:54:52.753975 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 20:54:52 crc kubenswrapper[4793]: I0127 20:54:52.755015 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 20:54:52 crc kubenswrapper[4793]: I0127 20:54:52.755080 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" gracePeriod=600 Jan 27 20:54:52 crc kubenswrapper[4793]: E0127 20:54:52.874290 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:54:53 crc kubenswrapper[4793]: I0127 20:54:53.142761 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" exitCode=0 Jan 27 20:54:53 crc kubenswrapper[4793]: I0127 20:54:53.142807 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76"} Jan 27 20:54:53 crc kubenswrapper[4793]: I0127 20:54:53.142841 4793 scope.go:117] "RemoveContainer" containerID="2beb845167fda1987a99f50b0f2e2d57c906953c6b58627f38b3a649c67041ad" Jan 27 20:54:53 crc kubenswrapper[4793]: I0127 20:54:53.143659 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:54:53 crc kubenswrapper[4793]: E0127 20:54:53.144006 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:54:57 crc kubenswrapper[4793]: I0127 20:54:57.803909 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:54:57 crc kubenswrapper[4793]: E0127 20:54:57.804926 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:55:08 crc kubenswrapper[4793]: I0127 20:55:08.803836 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:55:08 crc kubenswrapper[4793]: E0127 20:55:08.804568 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:55:12 crc kubenswrapper[4793]: I0127 20:55:12.803870 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:55:12 crc kubenswrapper[4793]: E0127 20:55:12.806984 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:55:21 crc kubenswrapper[4793]: I0127 20:55:21.804116 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:55:21 crc kubenswrapper[4793]: E0127 20:55:21.805217 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:55:24 crc kubenswrapper[4793]: I0127 20:55:24.803663 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:55:24 crc kubenswrapper[4793]: E0127 20:55:24.804745 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.083355 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.085996 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.095638 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.256956 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.257179 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.257240 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw2dc\" (UniqueName: \"kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.359223 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.359330 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw2dc\" (UniqueName: \"kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.359401 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.359908 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.360005 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.386088 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw2dc\" (UniqueName: \"kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc\") pod \"redhat-marketplace-xpqvz\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.411070 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:29 crc kubenswrapper[4793]: I0127 20:55:29.981510 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:29 crc kubenswrapper[4793]: W0127 20:55:29.984133 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70571f96_002a_4672_a83d_8d6478709c30.slice/crio-b0809b650fa96f56a9785df443d7a38f26addbcae1541612e5eed12107528d02 WatchSource:0}: Error finding container b0809b650fa96f56a9785df443d7a38f26addbcae1541612e5eed12107528d02: Status 404 returned error can't find the container with id b0809b650fa96f56a9785df443d7a38f26addbcae1541612e5eed12107528d02 Jan 27 20:55:30 crc kubenswrapper[4793]: I0127 20:55:30.506048 4793 generic.go:334] "Generic (PLEG): container finished" podID="70571f96-002a-4672-a83d-8d6478709c30" containerID="7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5" exitCode=0 Jan 27 20:55:30 crc kubenswrapper[4793]: I0127 20:55:30.506152 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerDied","Data":"7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5"} Jan 27 20:55:30 crc kubenswrapper[4793]: I0127 20:55:30.506353 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerStarted","Data":"b0809b650fa96f56a9785df443d7a38f26addbcae1541612e5eed12107528d02"} Jan 27 20:55:30 crc kubenswrapper[4793]: I0127 20:55:30.508492 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 20:55:31 crc kubenswrapper[4793]: I0127 20:55:31.520078 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerStarted","Data":"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468"} Jan 27 20:55:31 crc kubenswrapper[4793]: E0127 20:55:31.788527 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70571f96_002a_4672_a83d_8d6478709c30.slice/crio-f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468.scope\": RecentStats: unable to find data in memory cache]" Jan 27 20:55:32 crc kubenswrapper[4793]: I0127 20:55:32.531045 4793 generic.go:334] "Generic (PLEG): container finished" podID="70571f96-002a-4672-a83d-8d6478709c30" containerID="f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468" exitCode=0 Jan 27 20:55:32 crc kubenswrapper[4793]: I0127 20:55:32.531105 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerDied","Data":"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468"} Jan 27 20:55:33 crc kubenswrapper[4793]: I0127 20:55:33.544073 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerStarted","Data":"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074"} Jan 27 20:55:33 crc kubenswrapper[4793]: I0127 20:55:33.564802 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xpqvz" podStartSLOduration=1.852872412 podStartE2EDuration="4.564781418s" podCreationTimestamp="2026-01-27 20:55:29 +0000 UTC" firstStartedPulling="2026-01-27 20:55:30.508235589 +0000 UTC m=+3155.898488735" lastFinishedPulling="2026-01-27 20:55:33.220144585 +0000 UTC m=+3158.610397741" observedRunningTime="2026-01-27 20:55:33.560067083 +0000 UTC m=+3158.950320249" watchObservedRunningTime="2026-01-27 20:55:33.564781418 +0000 UTC m=+3158.955034574" Jan 27 20:55:35 crc kubenswrapper[4793]: I0127 20:55:35.815888 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:55:35 crc kubenswrapper[4793]: E0127 20:55:35.816574 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:55:36 crc kubenswrapper[4793]: I0127 20:55:36.803711 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:55:36 crc kubenswrapper[4793]: E0127 20:55:36.804098 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:55:39 crc kubenswrapper[4793]: I0127 20:55:39.412032 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:39 crc kubenswrapper[4793]: I0127 20:55:39.413746 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:39 crc kubenswrapper[4793]: I0127 20:55:39.462203 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:39 crc kubenswrapper[4793]: I0127 20:55:39.677148 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:39 crc kubenswrapper[4793]: I0127 20:55:39.753192 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:41 crc kubenswrapper[4793]: I0127 20:55:41.624742 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xpqvz" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="registry-server" containerID="cri-o://3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074" gracePeriod=2 Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.112979 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.271798 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fw2dc\" (UniqueName: \"kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc\") pod \"70571f96-002a-4672-a83d-8d6478709c30\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.272085 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities\") pod \"70571f96-002a-4672-a83d-8d6478709c30\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.272515 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content\") pod \"70571f96-002a-4672-a83d-8d6478709c30\" (UID: \"70571f96-002a-4672-a83d-8d6478709c30\") " Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.272785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities" (OuterVolumeSpecName: "utilities") pod "70571f96-002a-4672-a83d-8d6478709c30" (UID: "70571f96-002a-4672-a83d-8d6478709c30"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.273184 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.277863 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc" (OuterVolumeSpecName: "kube-api-access-fw2dc") pod "70571f96-002a-4672-a83d-8d6478709c30" (UID: "70571f96-002a-4672-a83d-8d6478709c30"). InnerVolumeSpecName "kube-api-access-fw2dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.293696 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70571f96-002a-4672-a83d-8d6478709c30" (UID: "70571f96-002a-4672-a83d-8d6478709c30"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.375504 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70571f96-002a-4672-a83d-8d6478709c30-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.375575 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fw2dc\" (UniqueName: \"kubernetes.io/projected/70571f96-002a-4672-a83d-8d6478709c30-kube-api-access-fw2dc\") on node \"crc\" DevicePath \"\"" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.653737 4793 generic.go:334] "Generic (PLEG): container finished" podID="70571f96-002a-4672-a83d-8d6478709c30" containerID="3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074" exitCode=0 Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.653793 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerDied","Data":"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074"} Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.653827 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xpqvz" event={"ID":"70571f96-002a-4672-a83d-8d6478709c30","Type":"ContainerDied","Data":"b0809b650fa96f56a9785df443d7a38f26addbcae1541612e5eed12107528d02"} Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.653849 4793 scope.go:117] "RemoveContainer" containerID="3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.653909 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xpqvz" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.689339 4793 scope.go:117] "RemoveContainer" containerID="f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.706463 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.716070 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xpqvz"] Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.729251 4793 scope.go:117] "RemoveContainer" containerID="7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.813779 4793 scope.go:117] "RemoveContainer" containerID="3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074" Jan 27 20:55:42 crc kubenswrapper[4793]: E0127 20:55:42.814374 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074\": container with ID starting with 3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074 not found: ID does not exist" containerID="3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.814422 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074"} err="failed to get container status \"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074\": rpc error: code = NotFound desc = could not find container \"3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074\": container with ID starting with 3ece81f263c972747ab00a321a1e44fbbf89c19445026063b98995be42f0d074 not found: ID does not exist" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.814450 4793 scope.go:117] "RemoveContainer" containerID="f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468" Jan 27 20:55:42 crc kubenswrapper[4793]: E0127 20:55:42.814894 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468\": container with ID starting with f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468 not found: ID does not exist" containerID="f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.814950 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468"} err="failed to get container status \"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468\": rpc error: code = NotFound desc = could not find container \"f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468\": container with ID starting with f0aab414d94a258ff20ffb0ee2a1146bd7bb09941583299e3d91a6ac93933468 not found: ID does not exist" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.814978 4793 scope.go:117] "RemoveContainer" containerID="7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5" Jan 27 20:55:42 crc kubenswrapper[4793]: E0127 20:55:42.816888 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5\": container with ID starting with 7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5 not found: ID does not exist" containerID="7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5" Jan 27 20:55:42 crc kubenswrapper[4793]: I0127 20:55:42.816920 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5"} err="failed to get container status \"7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5\": rpc error: code = NotFound desc = could not find container \"7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5\": container with ID starting with 7636f4ca718c5646d02c1edbf625188dd9ea418e72c1045d29d905e32c11e3a5 not found: ID does not exist" Jan 27 20:55:43 crc kubenswrapper[4793]: I0127 20:55:43.816891 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70571f96-002a-4672-a83d-8d6478709c30" path="/var/lib/kubelet/pods/70571f96-002a-4672-a83d-8d6478709c30/volumes" Jan 27 20:55:46 crc kubenswrapper[4793]: I0127 20:55:46.803228 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:55:46 crc kubenswrapper[4793]: E0127 20:55:46.803815 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:55:51 crc kubenswrapper[4793]: I0127 20:55:51.802964 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:55:51 crc kubenswrapper[4793]: E0127 20:55:51.804928 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:56:01 crc kubenswrapper[4793]: I0127 20:56:01.804232 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:56:01 crc kubenswrapper[4793]: E0127 20:56:01.806265 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:56:02 crc kubenswrapper[4793]: I0127 20:56:02.817490 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:56:02 crc kubenswrapper[4793]: E0127 20:56:02.818002 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:56:06 crc kubenswrapper[4793]: I0127 20:56:06.877495 4793 generic.go:334] "Generic (PLEG): container finished" podID="2ac35f24-57b8-4521-8509-5adc5ae84b60" containerID="b846bf496b3fed2109a54a9a08b57b9837fc63f05f51aa764b0e86d90aa594ad" exitCode=0 Jan 27 20:56:06 crc kubenswrapper[4793]: I0127 20:56:06.877588 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" event={"ID":"2ac35f24-57b8-4521-8509-5adc5ae84b60","Type":"ContainerDied","Data":"b846bf496b3fed2109a54a9a08b57b9837fc63f05f51aa764b0e86d90aa594ad"} Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.358019 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.429136 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t26dd\" (UniqueName: \"kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd\") pod \"2ac35f24-57b8-4521-8509-5adc5ae84b60\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.429210 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory\") pod \"2ac35f24-57b8-4521-8509-5adc5ae84b60\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.429255 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam\") pod \"2ac35f24-57b8-4521-8509-5adc5ae84b60\" (UID: \"2ac35f24-57b8-4521-8509-5adc5ae84b60\") " Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.436106 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd" (OuterVolumeSpecName: "kube-api-access-t26dd") pod "2ac35f24-57b8-4521-8509-5adc5ae84b60" (UID: "2ac35f24-57b8-4521-8509-5adc5ae84b60"). InnerVolumeSpecName "kube-api-access-t26dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.458013 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory" (OuterVolumeSpecName: "inventory") pod "2ac35f24-57b8-4521-8509-5adc5ae84b60" (UID: "2ac35f24-57b8-4521-8509-5adc5ae84b60"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.463484 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2ac35f24-57b8-4521-8509-5adc5ae84b60" (UID: "2ac35f24-57b8-4521-8509-5adc5ae84b60"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.533354 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t26dd\" (UniqueName: \"kubernetes.io/projected/2ac35f24-57b8-4521-8509-5adc5ae84b60-kube-api-access-t26dd\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.533403 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.533413 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ac35f24-57b8-4521-8509-5adc5ae84b60-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.897109 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" event={"ID":"2ac35f24-57b8-4521-8509-5adc5ae84b60","Type":"ContainerDied","Data":"133a05f150405977c03aa1c2965abb33600ca101d3d2273aada08e0b77ec1977"} Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.897438 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="133a05f150405977c03aa1c2965abb33600ca101d3d2273aada08e0b77ec1977" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.897503 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998238 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5"] Jan 27 20:56:08 crc kubenswrapper[4793]: E0127 20:56:08.998657 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac35f24-57b8-4521-8509-5adc5ae84b60" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998673 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac35f24-57b8-4521-8509-5adc5ae84b60" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 20:56:08 crc kubenswrapper[4793]: E0127 20:56:08.998692 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="registry-server" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998699 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="registry-server" Jan 27 20:56:08 crc kubenswrapper[4793]: E0127 20:56:08.998716 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="extract-utilities" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998722 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="extract-utilities" Jan 27 20:56:08 crc kubenswrapper[4793]: E0127 20:56:08.998748 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="extract-content" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998754 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="extract-content" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998952 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac35f24-57b8-4521-8509-5adc5ae84b60" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.998964 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="70571f96-002a-4672-a83d-8d6478709c30" containerName="registry-server" Jan 27 20:56:08 crc kubenswrapper[4793]: I0127 20:56:08.999647 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.001280 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.001568 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.002586 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.002899 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.010063 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5"] Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.041703 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kmnw\" (UniqueName: \"kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.041827 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.041932 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.144205 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.144407 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kmnw\" (UniqueName: \"kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.144452 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.148986 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.149237 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.160740 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kmnw\" (UniqueName: \"kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:09 crc kubenswrapper[4793]: I0127 20:56:09.326086 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:56:10 crc kubenswrapper[4793]: I0127 20:56:10.007811 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5"] Jan 27 20:56:10 crc kubenswrapper[4793]: I0127 20:56:10.916509 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" event={"ID":"411ebcf6-5cec-4604-9a7c-2f3c720296d6","Type":"ContainerStarted","Data":"c9f93e3b06ea60a446cb0dc6e80246bb657d68eeccc4efcaf8e60e48bd52296e"} Jan 27 20:56:10 crc kubenswrapper[4793]: I0127 20:56:10.917120 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" event={"ID":"411ebcf6-5cec-4604-9a7c-2f3c720296d6","Type":"ContainerStarted","Data":"dd1cc7543711ecb91da4cb8ba3cd154a7bf9f38451be7cac177be49819af74c4"} Jan 27 20:56:10 crc kubenswrapper[4793]: I0127 20:56:10.940824 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" podStartSLOduration=2.447425916 podStartE2EDuration="2.940804582s" podCreationTimestamp="2026-01-27 20:56:08 +0000 UTC" firstStartedPulling="2026-01-27 20:56:10.002604683 +0000 UTC m=+3195.392857839" lastFinishedPulling="2026-01-27 20:56:10.495983349 +0000 UTC m=+3195.886236505" observedRunningTime="2026-01-27 20:56:10.933643717 +0000 UTC m=+3196.323896883" watchObservedRunningTime="2026-01-27 20:56:10.940804582 +0000 UTC m=+3196.331057738" Jan 27 20:56:14 crc kubenswrapper[4793]: I0127 20:56:14.803791 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:56:14 crc kubenswrapper[4793]: E0127 20:56:14.804581 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:56:17 crc kubenswrapper[4793]: I0127 20:56:17.804300 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:56:17 crc kubenswrapper[4793]: E0127 20:56:17.804918 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.259659 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.262932 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.283462 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.391823 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.391894 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n89r\" (UniqueName: \"kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.391929 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.494521 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.494907 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n89r\" (UniqueName: \"kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.495025 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.495491 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.496432 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.522770 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n89r\" (UniqueName: \"kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r\") pod \"certified-operators-4snkt\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:23 crc kubenswrapper[4793]: I0127 20:56:23.584635 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:24 crc kubenswrapper[4793]: I0127 20:56:24.186658 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:25 crc kubenswrapper[4793]: I0127 20:56:25.139397 4793 generic.go:334] "Generic (PLEG): container finished" podID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerID="6d9df31cedf77986b1d2ce2081541aca8d842a61777573568a7ce5648676f5ca" exitCode=0 Jan 27 20:56:25 crc kubenswrapper[4793]: I0127 20:56:25.139563 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerDied","Data":"6d9df31cedf77986b1d2ce2081541aca8d842a61777573568a7ce5648676f5ca"} Jan 27 20:56:25 crc kubenswrapper[4793]: I0127 20:56:25.139752 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerStarted","Data":"a05b91096dfefa9076963d05a3fc68a5121901beabcb04156e8c0be948c23b8f"} Jan 27 20:56:25 crc kubenswrapper[4793]: I0127 20:56:25.811236 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:56:25 crc kubenswrapper[4793]: E0127 20:56:25.812034 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:56:26 crc kubenswrapper[4793]: I0127 20:56:26.154071 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerStarted","Data":"a8ac756d91bf9702b2f0b43964781476bf082805c98bf89637e9190fc71e0403"} Jan 27 20:56:27 crc kubenswrapper[4793]: I0127 20:56:27.320130 4793 generic.go:334] "Generic (PLEG): container finished" podID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerID="a8ac756d91bf9702b2f0b43964781476bf082805c98bf89637e9190fc71e0403" exitCode=0 Jan 27 20:56:27 crc kubenswrapper[4793]: I0127 20:56:27.320701 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerDied","Data":"a8ac756d91bf9702b2f0b43964781476bf082805c98bf89637e9190fc71e0403"} Jan 27 20:56:28 crc kubenswrapper[4793]: I0127 20:56:28.331432 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerStarted","Data":"2f1d7ceab56dcc9e66cd7bff836409812b559efe2b47c90f94fbb9cd1f1de7ee"} Jan 27 20:56:28 crc kubenswrapper[4793]: I0127 20:56:28.354311 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4snkt" podStartSLOduration=2.773751795 podStartE2EDuration="5.354286385s" podCreationTimestamp="2026-01-27 20:56:23 +0000 UTC" firstStartedPulling="2026-01-27 20:56:25.141466378 +0000 UTC m=+3210.531719534" lastFinishedPulling="2026-01-27 20:56:27.722000968 +0000 UTC m=+3213.112254124" observedRunningTime="2026-01-27 20:56:28.350892251 +0000 UTC m=+3213.741145427" watchObservedRunningTime="2026-01-27 20:56:28.354286385 +0000 UTC m=+3213.744539551" Jan 27 20:56:32 crc kubenswrapper[4793]: I0127 20:56:32.804368 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:56:32 crc kubenswrapper[4793]: E0127 20:56:32.805200 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:56:33 crc kubenswrapper[4793]: I0127 20:56:33.585275 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:33 crc kubenswrapper[4793]: I0127 20:56:33.585640 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:33 crc kubenswrapper[4793]: I0127 20:56:33.634337 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:34 crc kubenswrapper[4793]: I0127 20:56:34.432908 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:34 crc kubenswrapper[4793]: I0127 20:56:34.490786 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.402324 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4snkt" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="registry-server" containerID="cri-o://2f1d7ceab56dcc9e66cd7bff836409812b559efe2b47c90f94fbb9cd1f1de7ee" gracePeriod=2 Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.576705 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.578863 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.600888 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.707181 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dch4h\" (UniqueName: \"kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.707327 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.707362 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.809141 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dch4h\" (UniqueName: \"kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.809280 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.809325 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.809765 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.810015 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.830619 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dch4h\" (UniqueName: \"kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h\") pod \"redhat-operators-t22zf\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:36 crc kubenswrapper[4793]: I0127 20:56:36.900132 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:37 crc kubenswrapper[4793]: I0127 20:56:37.414123 4793 generic.go:334] "Generic (PLEG): container finished" podID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerID="2f1d7ceab56dcc9e66cd7bff836409812b559efe2b47c90f94fbb9cd1f1de7ee" exitCode=0 Jan 27 20:56:37 crc kubenswrapper[4793]: I0127 20:56:37.414231 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerDied","Data":"2f1d7ceab56dcc9e66cd7bff836409812b559efe2b47c90f94fbb9cd1f1de7ee"} Jan 27 20:56:37 crc kubenswrapper[4793]: I0127 20:56:37.570980 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.037277 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.303413 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities\") pod \"c991f8ae-8fb8-480b-b14a-816067c1c314\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.304356 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n89r\" (UniqueName: \"kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r\") pod \"c991f8ae-8fb8-480b-b14a-816067c1c314\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.304692 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content\") pod \"c991f8ae-8fb8-480b-b14a-816067c1c314\" (UID: \"c991f8ae-8fb8-480b-b14a-816067c1c314\") " Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.305449 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities" (OuterVolumeSpecName: "utilities") pod "c991f8ae-8fb8-480b-b14a-816067c1c314" (UID: "c991f8ae-8fb8-480b-b14a-816067c1c314"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.306719 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.321115 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r" (OuterVolumeSpecName: "kube-api-access-9n89r") pod "c991f8ae-8fb8-480b-b14a-816067c1c314" (UID: "c991f8ae-8fb8-480b-b14a-816067c1c314"). InnerVolumeSpecName "kube-api-access-9n89r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.359648 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c991f8ae-8fb8-480b-b14a-816067c1c314" (UID: "c991f8ae-8fb8-480b-b14a-816067c1c314"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.410544 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c991f8ae-8fb8-480b-b14a-816067c1c314-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.410613 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n89r\" (UniqueName: \"kubernetes.io/projected/c991f8ae-8fb8-480b-b14a-816067c1c314-kube-api-access-9n89r\") on node \"crc\" DevicePath \"\"" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.425376 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4snkt" event={"ID":"c991f8ae-8fb8-480b-b14a-816067c1c314","Type":"ContainerDied","Data":"a05b91096dfefa9076963d05a3fc68a5121901beabcb04156e8c0be948c23b8f"} Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.425473 4793 scope.go:117] "RemoveContainer" containerID="2f1d7ceab56dcc9e66cd7bff836409812b559efe2b47c90f94fbb9cd1f1de7ee" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.425803 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4snkt" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.431259 4793 generic.go:334] "Generic (PLEG): container finished" podID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerID="4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693" exitCode=0 Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.431325 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerDied","Data":"4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693"} Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.431355 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerStarted","Data":"53d7509afe1370c556b340d0c46657e0ceb9aeeb18b3334bedd6979d56a6f8cf"} Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.465911 4793 scope.go:117] "RemoveContainer" containerID="a8ac756d91bf9702b2f0b43964781476bf082805c98bf89637e9190fc71e0403" Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.503888 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.515381 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4snkt"] Jan 27 20:56:38 crc kubenswrapper[4793]: I0127 20:56:38.523858 4793 scope.go:117] "RemoveContainer" containerID="6d9df31cedf77986b1d2ce2081541aca8d842a61777573568a7ce5648676f5ca" Jan 27 20:56:39 crc kubenswrapper[4793]: I0127 20:56:39.442162 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerStarted","Data":"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e"} Jan 27 20:56:39 crc kubenswrapper[4793]: I0127 20:56:39.815748 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" path="/var/lib/kubelet/pods/c991f8ae-8fb8-480b-b14a-816067c1c314/volumes" Jan 27 20:56:40 crc kubenswrapper[4793]: I0127 20:56:40.802811 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:56:40 crc kubenswrapper[4793]: E0127 20:56:40.803180 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:56:42 crc kubenswrapper[4793]: I0127 20:56:42.474126 4793 generic.go:334] "Generic (PLEG): container finished" podID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerID="3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e" exitCode=0 Jan 27 20:56:42 crc kubenswrapper[4793]: I0127 20:56:42.474287 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerDied","Data":"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e"} Jan 27 20:56:43 crc kubenswrapper[4793]: I0127 20:56:43.485174 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerStarted","Data":"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647"} Jan 27 20:56:43 crc kubenswrapper[4793]: I0127 20:56:43.509111 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t22zf" podStartSLOduration=3.071791704 podStartE2EDuration="7.509087302s" podCreationTimestamp="2026-01-27 20:56:36 +0000 UTC" firstStartedPulling="2026-01-27 20:56:38.436194356 +0000 UTC m=+3223.826447512" lastFinishedPulling="2026-01-27 20:56:42.873489934 +0000 UTC m=+3228.263743110" observedRunningTime="2026-01-27 20:56:43.505989155 +0000 UTC m=+3228.896242331" watchObservedRunningTime="2026-01-27 20:56:43.509087302 +0000 UTC m=+3228.899340458" Jan 27 20:56:46 crc kubenswrapper[4793]: I0127 20:56:46.804523 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:56:46 crc kubenswrapper[4793]: E0127 20:56:46.805110 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:56:46 crc kubenswrapper[4793]: I0127 20:56:46.900949 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:46 crc kubenswrapper[4793]: I0127 20:56:46.901332 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:47 crc kubenswrapper[4793]: I0127 20:56:47.967521 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t22zf" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="registry-server" probeResult="failure" output=< Jan 27 20:56:47 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 20:56:47 crc kubenswrapper[4793]: > Jan 27 20:56:54 crc kubenswrapper[4793]: I0127 20:56:54.803642 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:56:54 crc kubenswrapper[4793]: E0127 20:56:54.804524 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:56:56 crc kubenswrapper[4793]: I0127 20:56:56.951658 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:56:57 crc kubenswrapper[4793]: I0127 20:56:57.006941 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.092109 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.093911 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t22zf" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="registry-server" containerID="cri-o://4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647" gracePeriod=2 Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.560963 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.662521 4793 generic.go:334] "Generic (PLEG): container finished" podID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerID="4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647" exitCode=0 Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.662583 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerDied","Data":"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647"} Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.662625 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22zf" event={"ID":"cd6a3aed-a632-47e2-89b6-ef8b211dd7db","Type":"ContainerDied","Data":"53d7509afe1370c556b340d0c46657e0ceb9aeeb18b3334bedd6979d56a6f8cf"} Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.662643 4793 scope.go:117] "RemoveContainer" containerID="4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.662855 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22zf" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.683721 4793 scope.go:117] "RemoveContainer" containerID="3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.694016 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dch4h\" (UniqueName: \"kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h\") pod \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.694236 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content\") pod \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.694459 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities\") pod \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\" (UID: \"cd6a3aed-a632-47e2-89b6-ef8b211dd7db\") " Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.695368 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities" (OuterVolumeSpecName: "utilities") pod "cd6a3aed-a632-47e2-89b6-ef8b211dd7db" (UID: "cd6a3aed-a632-47e2-89b6-ef8b211dd7db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.700675 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h" (OuterVolumeSpecName: "kube-api-access-dch4h") pod "cd6a3aed-a632-47e2-89b6-ef8b211dd7db" (UID: "cd6a3aed-a632-47e2-89b6-ef8b211dd7db"). InnerVolumeSpecName "kube-api-access-dch4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.714971 4793 scope.go:117] "RemoveContainer" containerID="4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.789207 4793 scope.go:117] "RemoveContainer" containerID="4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647" Jan 27 20:57:00 crc kubenswrapper[4793]: E0127 20:57:00.789516 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647\": container with ID starting with 4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647 not found: ID does not exist" containerID="4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.789723 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647"} err="failed to get container status \"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647\": rpc error: code = NotFound desc = could not find container \"4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647\": container with ID starting with 4061beefea1d79e8e90d2777087dce62424fa4af7051fc0e599d571551177647 not found: ID does not exist" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.789749 4793 scope.go:117] "RemoveContainer" containerID="3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e" Jan 27 20:57:00 crc kubenswrapper[4793]: E0127 20:57:00.790030 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e\": container with ID starting with 3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e not found: ID does not exist" containerID="3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.790055 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e"} err="failed to get container status \"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e\": rpc error: code = NotFound desc = could not find container \"3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e\": container with ID starting with 3e257f7566878be148ea83160eb39ae4a1e79b578a67139119869991818f227e not found: ID does not exist" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.790074 4793 scope.go:117] "RemoveContainer" containerID="4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693" Jan 27 20:57:00 crc kubenswrapper[4793]: E0127 20:57:00.790623 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693\": container with ID starting with 4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693 not found: ID does not exist" containerID="4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.790663 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693"} err="failed to get container status \"4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693\": rpc error: code = NotFound desc = could not find container \"4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693\": container with ID starting with 4c7d6226967044b234ea4502c1124ddf29d7a8ca7c2e8cae7815e252207b9693 not found: ID does not exist" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.796529 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.796574 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dch4h\" (UniqueName: \"kubernetes.io/projected/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-kube-api-access-dch4h\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.836531 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd6a3aed-a632-47e2-89b6-ef8b211dd7db" (UID: "cd6a3aed-a632-47e2-89b6-ef8b211dd7db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.898767 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6a3aed-a632-47e2-89b6-ef8b211dd7db-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:00 crc kubenswrapper[4793]: I0127 20:57:00.997218 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:57:01 crc kubenswrapper[4793]: I0127 20:57:01.005419 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t22zf"] Jan 27 20:57:01 crc kubenswrapper[4793]: I0127 20:57:01.803578 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:57:01 crc kubenswrapper[4793]: E0127 20:57:01.803920 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:57:01 crc kubenswrapper[4793]: I0127 20:57:01.814756 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" path="/var/lib/kubelet/pods/cd6a3aed-a632-47e2-89b6-ef8b211dd7db/volumes" Jan 27 20:57:09 crc kubenswrapper[4793]: I0127 20:57:09.803407 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:57:09 crc kubenswrapper[4793]: E0127 20:57:09.804017 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:57:15 crc kubenswrapper[4793]: I0127 20:57:15.813709 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:57:15 crc kubenswrapper[4793]: E0127 20:57:15.814363 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:57:24 crc kubenswrapper[4793]: I0127 20:57:24.804229 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:57:24 crc kubenswrapper[4793]: E0127 20:57:24.805032 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:57:27 crc kubenswrapper[4793]: I0127 20:57:27.934354 4793 generic.go:334] "Generic (PLEG): container finished" podID="411ebcf6-5cec-4604-9a7c-2f3c720296d6" containerID="c9f93e3b06ea60a446cb0dc6e80246bb657d68eeccc4efcaf8e60e48bd52296e" exitCode=0 Jan 27 20:57:27 crc kubenswrapper[4793]: I0127 20:57:27.934415 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" event={"ID":"411ebcf6-5cec-4604-9a7c-2f3c720296d6","Type":"ContainerDied","Data":"c9f93e3b06ea60a446cb0dc6e80246bb657d68eeccc4efcaf8e60e48bd52296e"} Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.366332 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.509134 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory\") pod \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.509631 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam\") pod \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.509669 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kmnw\" (UniqueName: \"kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw\") pod \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\" (UID: \"411ebcf6-5cec-4604-9a7c-2f3c720296d6\") " Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.516516 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw" (OuterVolumeSpecName: "kube-api-access-2kmnw") pod "411ebcf6-5cec-4604-9a7c-2f3c720296d6" (UID: "411ebcf6-5cec-4604-9a7c-2f3c720296d6"). InnerVolumeSpecName "kube-api-access-2kmnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.545930 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "411ebcf6-5cec-4604-9a7c-2f3c720296d6" (UID: "411ebcf6-5cec-4604-9a7c-2f3c720296d6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.551706 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory" (OuterVolumeSpecName: "inventory") pod "411ebcf6-5cec-4604-9a7c-2f3c720296d6" (UID: "411ebcf6-5cec-4604-9a7c-2f3c720296d6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.612346 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.612399 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kmnw\" (UniqueName: \"kubernetes.io/projected/411ebcf6-5cec-4604-9a7c-2f3c720296d6-kube-api-access-2kmnw\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.612415 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/411ebcf6-5cec-4604-9a7c-2f3c720296d6-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.803444 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:57:29 crc kubenswrapper[4793]: E0127 20:57:29.803857 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.963313 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" event={"ID":"411ebcf6-5cec-4604-9a7c-2f3c720296d6","Type":"ContainerDied","Data":"dd1cc7543711ecb91da4cb8ba3cd154a7bf9f38451be7cac177be49819af74c4"} Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.963721 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd1cc7543711ecb91da4cb8ba3cd154a7bf9f38451be7cac177be49819af74c4" Jan 27 20:57:29 crc kubenswrapper[4793]: I0127 20:57:29.963425 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046137 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms"] Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046562 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="411ebcf6-5cec-4604-9a7c-2f3c720296d6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046577 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="411ebcf6-5cec-4604-9a7c-2f3c720296d6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046595 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046604 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046612 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="extract-utilities" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046618 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="extract-utilities" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046637 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046644 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046654 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="extract-content" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046662 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="extract-content" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046689 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="extract-utilities" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046697 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="extract-utilities" Jan 27 20:57:30 crc kubenswrapper[4793]: E0127 20:57:30.046704 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="extract-content" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046709 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="extract-content" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046901 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="411ebcf6-5cec-4604-9a7c-2f3c720296d6" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046914 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd6a3aed-a632-47e2-89b6-ef8b211dd7db" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.046926 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="c991f8ae-8fb8-480b-b14a-816067c1c314" containerName="registry-server" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.047725 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.050287 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.050620 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.051729 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.052976 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.060226 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms"] Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.224866 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k4jx\" (UniqueName: \"kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.224969 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.225053 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.326942 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k4jx\" (UniqueName: \"kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.327023 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.327095 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.340024 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.340284 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.344598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k4jx\" (UniqueName: \"kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-682ms\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.370727 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.901133 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms"] Jan 27 20:57:30 crc kubenswrapper[4793]: W0127 20:57:30.911793 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68e3cfe1_7fc5_4cf0_89e9_fcd526e8fa6f.slice/crio-07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d WatchSource:0}: Error finding container 07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d: Status 404 returned error can't find the container with id 07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d Jan 27 20:57:30 crc kubenswrapper[4793]: I0127 20:57:30.973131 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" event={"ID":"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f","Type":"ContainerStarted","Data":"07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d"} Jan 27 20:57:31 crc kubenswrapper[4793]: I0127 20:57:31.984077 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" event={"ID":"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f","Type":"ContainerStarted","Data":"4ae912c273a074eecfab07bce2345169b58d688449b052d46b70085e8b00e28d"} Jan 27 20:57:32 crc kubenswrapper[4793]: I0127 20:57:32.012950 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" podStartSLOduration=1.525409685 podStartE2EDuration="2.012932263s" podCreationTimestamp="2026-01-27 20:57:30 +0000 UTC" firstStartedPulling="2026-01-27 20:57:30.915061066 +0000 UTC m=+3276.305314222" lastFinishedPulling="2026-01-27 20:57:31.402583644 +0000 UTC m=+3276.792836800" observedRunningTime="2026-01-27 20:57:32.00710972 +0000 UTC m=+3277.397362876" watchObservedRunningTime="2026-01-27 20:57:32.012932263 +0000 UTC m=+3277.403185419" Jan 27 20:57:36 crc kubenswrapper[4793]: I0127 20:57:36.803185 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:57:36 crc kubenswrapper[4793]: E0127 20:57:36.803937 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:57:37 crc kubenswrapper[4793]: I0127 20:57:37.033985 4793 generic.go:334] "Generic (PLEG): container finished" podID="68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" containerID="4ae912c273a074eecfab07bce2345169b58d688449b052d46b70085e8b00e28d" exitCode=0 Jan 27 20:57:37 crc kubenswrapper[4793]: I0127 20:57:37.034040 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" event={"ID":"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f","Type":"ContainerDied","Data":"4ae912c273a074eecfab07bce2345169b58d688449b052d46b70085e8b00e28d"} Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.561133 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.734944 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam\") pod \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.735133 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory\") pod \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.735230 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k4jx\" (UniqueName: \"kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx\") pod \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\" (UID: \"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f\") " Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.742894 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx" (OuterVolumeSpecName: "kube-api-access-5k4jx") pod "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" (UID: "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f"). InnerVolumeSpecName "kube-api-access-5k4jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.768074 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" (UID: "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.768466 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory" (OuterVolumeSpecName: "inventory") pod "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" (UID: "68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.838370 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.838429 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:38 crc kubenswrapper[4793]: I0127 20:57:38.838448 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k4jx\" (UniqueName: \"kubernetes.io/projected/68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f-kube-api-access-5k4jx\") on node \"crc\" DevicePath \"\"" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.055418 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" event={"ID":"68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f","Type":"ContainerDied","Data":"07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d"} Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.055831 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07274afd55484bc1fce63df8f69f095c8d6426247b167e9418c62682a7cc571d" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.055494 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-682ms" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.155441 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65"] Jan 27 20:57:39 crc kubenswrapper[4793]: E0127 20:57:39.162589 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.162626 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.163074 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.164330 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.166347 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.167116 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65"] Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.168667 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.168955 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.169099 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.247459 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb2cc\" (UniqueName: \"kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.247643 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.248016 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.349645 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb2cc\" (UniqueName: \"kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.349790 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.349905 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.354597 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.354783 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.365170 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb2cc\" (UniqueName: \"kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-k4h65\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:39 crc kubenswrapper[4793]: I0127 20:57:39.492093 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:57:40 crc kubenswrapper[4793]: I0127 20:57:40.079706 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65"] Jan 27 20:57:41 crc kubenswrapper[4793]: I0127 20:57:41.074078 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" event={"ID":"ab303649-e7fe-4056-8414-9afda486e099","Type":"ContainerStarted","Data":"9619a5e0ce2386a628f53746ea2eec0d8f89b3f590218dcbd83ff6d010f7789a"} Jan 27 20:57:41 crc kubenswrapper[4793]: I0127 20:57:41.074769 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" event={"ID":"ab303649-e7fe-4056-8414-9afda486e099","Type":"ContainerStarted","Data":"0c36e0d8f163241d43c4953f5a36c37bf6ebfcedd32a8216c8ca4e739869d2ea"} Jan 27 20:57:41 crc kubenswrapper[4793]: I0127 20:57:41.097335 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" podStartSLOduration=1.679127262 podStartE2EDuration="2.097314058s" podCreationTimestamp="2026-01-27 20:57:39 +0000 UTC" firstStartedPulling="2026-01-27 20:57:40.085484643 +0000 UTC m=+3285.475737799" lastFinishedPulling="2026-01-27 20:57:40.503671439 +0000 UTC m=+3285.893924595" observedRunningTime="2026-01-27 20:57:41.088366092 +0000 UTC m=+3286.478619258" watchObservedRunningTime="2026-01-27 20:57:41.097314058 +0000 UTC m=+3286.487567204" Jan 27 20:57:43 crc kubenswrapper[4793]: I0127 20:57:43.804301 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:57:43 crc kubenswrapper[4793]: E0127 20:57:43.805242 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:57:48 crc kubenswrapper[4793]: I0127 20:57:48.804164 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:57:48 crc kubenswrapper[4793]: E0127 20:57:48.805287 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:57:58 crc kubenswrapper[4793]: I0127 20:57:58.804215 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:57:58 crc kubenswrapper[4793]: E0127 20:57:58.805228 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:58:02 crc kubenswrapper[4793]: I0127 20:58:02.804086 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:58:02 crc kubenswrapper[4793]: E0127 20:58:02.804784 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:58:11 crc kubenswrapper[4793]: I0127 20:58:11.803999 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:58:11 crc kubenswrapper[4793]: E0127 20:58:11.805049 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:58:16 crc kubenswrapper[4793]: I0127 20:58:16.803686 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:58:16 crc kubenswrapper[4793]: E0127 20:58:16.804504 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:58:24 crc kubenswrapper[4793]: I0127 20:58:24.451338 4793 generic.go:334] "Generic (PLEG): container finished" podID="ab303649-e7fe-4056-8414-9afda486e099" containerID="9619a5e0ce2386a628f53746ea2eec0d8f89b3f590218dcbd83ff6d010f7789a" exitCode=0 Jan 27 20:58:24 crc kubenswrapper[4793]: I0127 20:58:24.451434 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" event={"ID":"ab303649-e7fe-4056-8414-9afda486e099","Type":"ContainerDied","Data":"9619a5e0ce2386a628f53746ea2eec0d8f89b3f590218dcbd83ff6d010f7789a"} Jan 27 20:58:24 crc kubenswrapper[4793]: I0127 20:58:24.803820 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:58:24 crc kubenswrapper[4793]: E0127 20:58:24.804223 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:58:25 crc kubenswrapper[4793]: I0127 20:58:25.955482 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.031161 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam\") pod \"ab303649-e7fe-4056-8414-9afda486e099\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.031522 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory\") pod \"ab303649-e7fe-4056-8414-9afda486e099\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.031692 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb2cc\" (UniqueName: \"kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc\") pod \"ab303649-e7fe-4056-8414-9afda486e099\" (UID: \"ab303649-e7fe-4056-8414-9afda486e099\") " Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.037455 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc" (OuterVolumeSpecName: "kube-api-access-xb2cc") pod "ab303649-e7fe-4056-8414-9afda486e099" (UID: "ab303649-e7fe-4056-8414-9afda486e099"). InnerVolumeSpecName "kube-api-access-xb2cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.060862 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory" (OuterVolumeSpecName: "inventory") pod "ab303649-e7fe-4056-8414-9afda486e099" (UID: "ab303649-e7fe-4056-8414-9afda486e099"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.061377 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ab303649-e7fe-4056-8414-9afda486e099" (UID: "ab303649-e7fe-4056-8414-9afda486e099"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.134262 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.134311 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab303649-e7fe-4056-8414-9afda486e099-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.134326 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb2cc\" (UniqueName: \"kubernetes.io/projected/ab303649-e7fe-4056-8414-9afda486e099-kube-api-access-xb2cc\") on node \"crc\" DevicePath \"\"" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.468973 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" event={"ID":"ab303649-e7fe-4056-8414-9afda486e099","Type":"ContainerDied","Data":"0c36e0d8f163241d43c4953f5a36c37bf6ebfcedd32a8216c8ca4e739869d2ea"} Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.469028 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c36e0d8f163241d43c4953f5a36c37bf6ebfcedd32a8216c8ca4e739869d2ea" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.469085 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-k4h65" Jan 27 20:58:26 crc kubenswrapper[4793]: E0127 20:58:26.568480 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab303649_e7fe_4056_8414_9afda486e099.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab303649_e7fe_4056_8414_9afda486e099.slice/crio-0c36e0d8f163241d43c4953f5a36c37bf6ebfcedd32a8216c8ca4e739869d2ea\": RecentStats: unable to find data in memory cache]" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.587030 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64"] Jan 27 20:58:26 crc kubenswrapper[4793]: E0127 20:58:26.587437 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab303649-e7fe-4056-8414-9afda486e099" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.587453 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab303649-e7fe-4056-8414-9afda486e099" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.587861 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab303649-e7fe-4056-8414-9afda486e099" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.588833 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.591238 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.591635 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.591641 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.591674 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.631647 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64"] Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.643074 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.643158 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.643302 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rklc\" (UniqueName: \"kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.745113 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.745488 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.745675 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rklc\" (UniqueName: \"kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.749455 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.751174 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.766836 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rklc\" (UniqueName: \"kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-l2g64\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:26 crc kubenswrapper[4793]: I0127 20:58:26.950716 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:58:27 crc kubenswrapper[4793]: I0127 20:58:27.515302 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64"] Jan 27 20:58:28 crc kubenswrapper[4793]: I0127 20:58:28.501529 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" event={"ID":"93d0a92d-00df-4317-a361-d4d1858b0602","Type":"ContainerStarted","Data":"cb0b65971ad7df988c6edf3ae5cd1b92d779a24e77cc66829e4ba3618f1d617f"} Jan 27 20:58:28 crc kubenswrapper[4793]: I0127 20:58:28.502249 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" event={"ID":"93d0a92d-00df-4317-a361-d4d1858b0602","Type":"ContainerStarted","Data":"6f6829bef15b4757c815829b0697266885148c7620b993969f7bd2bf1426e3c7"} Jan 27 20:58:28 crc kubenswrapper[4793]: I0127 20:58:28.525346 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" podStartSLOduration=2.100987576 podStartE2EDuration="2.525323103s" podCreationTimestamp="2026-01-27 20:58:26 +0000 UTC" firstStartedPulling="2026-01-27 20:58:27.530276389 +0000 UTC m=+3332.920529545" lastFinishedPulling="2026-01-27 20:58:27.954611916 +0000 UTC m=+3333.344865072" observedRunningTime="2026-01-27 20:58:28.515125694 +0000 UTC m=+3333.905378870" watchObservedRunningTime="2026-01-27 20:58:28.525323103 +0000 UTC m=+3333.915576259" Jan 27 20:58:28 crc kubenswrapper[4793]: I0127 20:58:28.804240 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:58:28 crc kubenswrapper[4793]: E0127 20:58:28.804514 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:58:39 crc kubenswrapper[4793]: I0127 20:58:39.868358 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:58:40 crc kubenswrapper[4793]: I0127 20:58:40.621713 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292"} Jan 27 20:58:41 crc kubenswrapper[4793]: I0127 20:58:41.805034 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:58:41 crc kubenswrapper[4793]: E0127 20:58:41.805711 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:58:43 crc kubenswrapper[4793]: I0127 20:58:43.243450 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 20:58:43 crc kubenswrapper[4793]: I0127 20:58:43.682050 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" exitCode=1 Jan 27 20:58:43 crc kubenswrapper[4793]: I0127 20:58:43.682143 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292"} Jan 27 20:58:43 crc kubenswrapper[4793]: I0127 20:58:43.682304 4793 scope.go:117] "RemoveContainer" containerID="84851a8140dd090a52083ce274f0fad5ba353fc6237fab7d60d3319e6ebd2e7e" Jan 27 20:58:43 crc kubenswrapper[4793]: I0127 20:58:43.683185 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:58:43 crc kubenswrapper[4793]: E0127 20:58:43.683719 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:58:48 crc kubenswrapper[4793]: I0127 20:58:48.242771 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:58:48 crc kubenswrapper[4793]: I0127 20:58:48.243265 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:58:48 crc kubenswrapper[4793]: I0127 20:58:48.243274 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 20:58:48 crc kubenswrapper[4793]: I0127 20:58:48.243987 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:58:48 crc kubenswrapper[4793]: E0127 20:58:48.244218 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:58:53 crc kubenswrapper[4793]: I0127 20:58:53.803068 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:58:53 crc kubenswrapper[4793]: E0127 20:58:53.803805 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:58:58 crc kubenswrapper[4793]: I0127 20:58:58.853448 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:58:58 crc kubenswrapper[4793]: E0127 20:58:58.854195 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:59:08 crc kubenswrapper[4793]: I0127 20:59:08.803827 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:59:08 crc kubenswrapper[4793]: E0127 20:59:08.804708 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:59:11 crc kubenswrapper[4793]: I0127 20:59:11.803470 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:59:11 crc kubenswrapper[4793]: E0127 20:59:11.804262 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:59:20 crc kubenswrapper[4793]: I0127 20:59:20.803168 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:59:20 crc kubenswrapper[4793]: E0127 20:59:20.804063 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:59:26 crc kubenswrapper[4793]: I0127 20:59:26.856841 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:59:26 crc kubenswrapper[4793]: E0127 20:59:26.857535 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:59:28 crc kubenswrapper[4793]: I0127 20:59:28.762819 4793 generic.go:334] "Generic (PLEG): container finished" podID="93d0a92d-00df-4317-a361-d4d1858b0602" containerID="cb0b65971ad7df988c6edf3ae5cd1b92d779a24e77cc66829e4ba3618f1d617f" exitCode=0 Jan 27 20:59:28 crc kubenswrapper[4793]: I0127 20:59:28.762916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" event={"ID":"93d0a92d-00df-4317-a361-d4d1858b0602","Type":"ContainerDied","Data":"cb0b65971ad7df988c6edf3ae5cd1b92d779a24e77cc66829e4ba3618f1d617f"} Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.295017 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.423233 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rklc\" (UniqueName: \"kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc\") pod \"93d0a92d-00df-4317-a361-d4d1858b0602\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.423659 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory\") pod \"93d0a92d-00df-4317-a361-d4d1858b0602\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.423800 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam\") pod \"93d0a92d-00df-4317-a361-d4d1858b0602\" (UID: \"93d0a92d-00df-4317-a361-d4d1858b0602\") " Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.431390 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc" (OuterVolumeSpecName: "kube-api-access-8rklc") pod "93d0a92d-00df-4317-a361-d4d1858b0602" (UID: "93d0a92d-00df-4317-a361-d4d1858b0602"). InnerVolumeSpecName "kube-api-access-8rklc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.457232 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "93d0a92d-00df-4317-a361-d4d1858b0602" (UID: "93d0a92d-00df-4317-a361-d4d1858b0602"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.458287 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory" (OuterVolumeSpecName: "inventory") pod "93d0a92d-00df-4317-a361-d4d1858b0602" (UID: "93d0a92d-00df-4317-a361-d4d1858b0602"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.528495 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rklc\" (UniqueName: \"kubernetes.io/projected/93d0a92d-00df-4317-a361-d4d1858b0602-kube-api-access-8rklc\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.528553 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.528600 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/93d0a92d-00df-4317-a361-d4d1858b0602-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.782626 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.782676 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-l2g64" event={"ID":"93d0a92d-00df-4317-a361-d4d1858b0602","Type":"ContainerDied","Data":"6f6829bef15b4757c815829b0697266885148c7620b993969f7bd2bf1426e3c7"} Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.782717 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f6829bef15b4757c815829b0697266885148c7620b993969f7bd2bf1426e3c7" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.894088 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tlxlc"] Jan 27 20:59:30 crc kubenswrapper[4793]: E0127 20:59:30.894544 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d0a92d-00df-4317-a361-d4d1858b0602" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.894583 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d0a92d-00df-4317-a361-d4d1858b0602" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.894834 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="93d0a92d-00df-4317-a361-d4d1858b0602" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.895531 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.900518 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.900801 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.904400 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.908986 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.925374 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tlxlc"] Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.942016 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.942106 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6hsz\" (UniqueName: \"kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:30 crc kubenswrapper[4793]: I0127 20:59:30.942173 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.043617 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.043673 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6hsz\" (UniqueName: \"kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.043719 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.053028 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.053451 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.064530 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6hsz\" (UniqueName: \"kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz\") pod \"ssh-known-hosts-edpm-deployment-tlxlc\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.272817 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:31 crc kubenswrapper[4793]: W0127 20:59:31.807994 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod018aa89c_0173_426e_b107_81c9b171c475.slice/crio-ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a WatchSource:0}: Error finding container ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a: Status 404 returned error can't find the container with id ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a Jan 27 20:59:31 crc kubenswrapper[4793]: I0127 20:59:31.817349 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tlxlc"] Jan 27 20:59:32 crc kubenswrapper[4793]: I0127 20:59:32.812546 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" event={"ID":"018aa89c-0173-426e-b107-81c9b171c475","Type":"ContainerStarted","Data":"34ee8378c7704c583a2f45bf3c31b849d8de5cb7f72174e4156ee9c5eea650e9"} Jan 27 20:59:32 crc kubenswrapper[4793]: I0127 20:59:32.812949 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" event={"ID":"018aa89c-0173-426e-b107-81c9b171c475","Type":"ContainerStarted","Data":"ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a"} Jan 27 20:59:32 crc kubenswrapper[4793]: I0127 20:59:32.837318 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" podStartSLOduration=2.305841758 podStartE2EDuration="2.837295899s" podCreationTimestamp="2026-01-27 20:59:30 +0000 UTC" firstStartedPulling="2026-01-27 20:59:31.81308506 +0000 UTC m=+3397.203338216" lastFinishedPulling="2026-01-27 20:59:32.344539201 +0000 UTC m=+3397.734792357" observedRunningTime="2026-01-27 20:59:32.832231 +0000 UTC m=+3398.222484156" watchObservedRunningTime="2026-01-27 20:59:32.837295899 +0000 UTC m=+3398.227549055" Jan 27 20:59:33 crc kubenswrapper[4793]: I0127 20:59:33.803755 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:59:33 crc kubenswrapper[4793]: E0127 20:59:33.804205 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:59:41 crc kubenswrapper[4793]: I0127 20:59:41.132850 4793 generic.go:334] "Generic (PLEG): container finished" podID="018aa89c-0173-426e-b107-81c9b171c475" containerID="34ee8378c7704c583a2f45bf3c31b849d8de5cb7f72174e4156ee9c5eea650e9" exitCode=0 Jan 27 20:59:41 crc kubenswrapper[4793]: I0127 20:59:41.132967 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" event={"ID":"018aa89c-0173-426e-b107-81c9b171c475","Type":"ContainerDied","Data":"34ee8378c7704c583a2f45bf3c31b849d8de5cb7f72174e4156ee9c5eea650e9"} Jan 27 20:59:41 crc kubenswrapper[4793]: I0127 20:59:41.803566 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:59:41 crc kubenswrapper[4793]: E0127 20:59:41.803864 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.670362 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.868588 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6hsz\" (UniqueName: \"kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz\") pod \"018aa89c-0173-426e-b107-81c9b171c475\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.868784 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0\") pod \"018aa89c-0173-426e-b107-81c9b171c475\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.869495 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam\") pod \"018aa89c-0173-426e-b107-81c9b171c475\" (UID: \"018aa89c-0173-426e-b107-81c9b171c475\") " Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.881426 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz" (OuterVolumeSpecName: "kube-api-access-n6hsz") pod "018aa89c-0173-426e-b107-81c9b171c475" (UID: "018aa89c-0173-426e-b107-81c9b171c475"). InnerVolumeSpecName "kube-api-access-n6hsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.902800 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "018aa89c-0173-426e-b107-81c9b171c475" (UID: "018aa89c-0173-426e-b107-81c9b171c475"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.916034 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "018aa89c-0173-426e-b107-81c9b171c475" (UID: "018aa89c-0173-426e-b107-81c9b171c475"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.972388 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6hsz\" (UniqueName: \"kubernetes.io/projected/018aa89c-0173-426e-b107-81c9b171c475-kube-api-access-n6hsz\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.972422 4793 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:42 crc kubenswrapper[4793]: I0127 20:59:42.972434 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/018aa89c-0173-426e-b107-81c9b171c475-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.152478 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" event={"ID":"018aa89c-0173-426e-b107-81c9b171c475","Type":"ContainerDied","Data":"ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a"} Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.152516 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae4f9127ede979158cb0413b608f10af8645a2a4766297eb30f9697e8b46672a" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.153315 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tlxlc" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.235281 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl"] Jan 27 20:59:43 crc kubenswrapper[4793]: E0127 20:59:43.236059 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018aa89c-0173-426e-b107-81c9b171c475" containerName="ssh-known-hosts-edpm-deployment" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.236083 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="018aa89c-0173-426e-b107-81c9b171c475" containerName="ssh-known-hosts-edpm-deployment" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.236355 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="018aa89c-0173-426e-b107-81c9b171c475" containerName="ssh-known-hosts-edpm-deployment" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.237044 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.242691 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.242762 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.242928 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.243174 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.271620 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl"] Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.277994 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsrlf\" (UniqueName: \"kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.278055 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.278142 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.381189 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.381406 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.381656 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsrlf\" (UniqueName: \"kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.385069 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.385530 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.399176 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsrlf\" (UniqueName: \"kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-dgldl\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:43 crc kubenswrapper[4793]: I0127 20:59:43.570869 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:44 crc kubenswrapper[4793]: I0127 20:59:44.116518 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl"] Jan 27 20:59:44 crc kubenswrapper[4793]: I0127 20:59:44.161271 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" event={"ID":"86d391ca-f72f-4332-9d2b-568200608a9e","Type":"ContainerStarted","Data":"376e3bda8ccd1ae97748a693407ad4184498d05e2763bb25afbd99c7c2b891c5"} Jan 27 20:59:46 crc kubenswrapper[4793]: I0127 20:59:46.192021 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" event={"ID":"86d391ca-f72f-4332-9d2b-568200608a9e","Type":"ContainerStarted","Data":"56fe1f7cd8dd60310479532b9285921276f7492256bdfb0fe792ca0e0588b026"} Jan 27 20:59:46 crc kubenswrapper[4793]: I0127 20:59:46.221295 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" podStartSLOduration=2.022970104 podStartE2EDuration="3.221277032s" podCreationTimestamp="2026-01-27 20:59:43 +0000 UTC" firstStartedPulling="2026-01-27 20:59:44.10607631 +0000 UTC m=+3409.496329466" lastFinishedPulling="2026-01-27 20:59:45.304383208 +0000 UTC m=+3410.694636394" observedRunningTime="2026-01-27 20:59:46.214789557 +0000 UTC m=+3411.605042713" watchObservedRunningTime="2026-01-27 20:59:46.221277032 +0000 UTC m=+3411.611530188" Jan 27 20:59:46 crc kubenswrapper[4793]: I0127 20:59:46.803461 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:59:46 crc kubenswrapper[4793]: E0127 20:59:46.803837 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 20:59:54 crc kubenswrapper[4793]: I0127 20:59:54.264080 4793 generic.go:334] "Generic (PLEG): container finished" podID="86d391ca-f72f-4332-9d2b-568200608a9e" containerID="56fe1f7cd8dd60310479532b9285921276f7492256bdfb0fe792ca0e0588b026" exitCode=0 Jan 27 20:59:54 crc kubenswrapper[4793]: I0127 20:59:54.264182 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" event={"ID":"86d391ca-f72f-4332-9d2b-568200608a9e","Type":"ContainerDied","Data":"56fe1f7cd8dd60310479532b9285921276f7492256bdfb0fe792ca0e0588b026"} Jan 27 20:59:54 crc kubenswrapper[4793]: I0127 20:59:54.805628 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 20:59:54 crc kubenswrapper[4793]: E0127 20:59:54.806115 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.794020 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.918389 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam\") pod \"86d391ca-f72f-4332-9d2b-568200608a9e\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.919162 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory\") pod \"86d391ca-f72f-4332-9d2b-568200608a9e\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.919285 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsrlf\" (UniqueName: \"kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf\") pod \"86d391ca-f72f-4332-9d2b-568200608a9e\" (UID: \"86d391ca-f72f-4332-9d2b-568200608a9e\") " Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.925189 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf" (OuterVolumeSpecName: "kube-api-access-bsrlf") pod "86d391ca-f72f-4332-9d2b-568200608a9e" (UID: "86d391ca-f72f-4332-9d2b-568200608a9e"). InnerVolumeSpecName "kube-api-access-bsrlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.946184 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "86d391ca-f72f-4332-9d2b-568200608a9e" (UID: "86d391ca-f72f-4332-9d2b-568200608a9e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:55 crc kubenswrapper[4793]: I0127 20:59:55.948088 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory" (OuterVolumeSpecName: "inventory") pod "86d391ca-f72f-4332-9d2b-568200608a9e" (UID: "86d391ca-f72f-4332-9d2b-568200608a9e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.023058 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.023114 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsrlf\" (UniqueName: \"kubernetes.io/projected/86d391ca-f72f-4332-9d2b-568200608a9e-kube-api-access-bsrlf\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.023132 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/86d391ca-f72f-4332-9d2b-568200608a9e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.285107 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" event={"ID":"86d391ca-f72f-4332-9d2b-568200608a9e","Type":"ContainerDied","Data":"376e3bda8ccd1ae97748a693407ad4184498d05e2763bb25afbd99c7c2b891c5"} Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.285172 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="376e3bda8ccd1ae97748a693407ad4184498d05e2763bb25afbd99c7c2b891c5" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.285210 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-dgldl" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.582233 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m"] Jan 27 20:59:56 crc kubenswrapper[4793]: E0127 20:59:56.582756 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86d391ca-f72f-4332-9d2b-568200608a9e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.582776 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="86d391ca-f72f-4332-9d2b-568200608a9e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.582985 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="86d391ca-f72f-4332-9d2b-568200608a9e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.583825 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.589051 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.589338 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.591920 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.592480 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.594056 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m"] Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.636094 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.636457 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.636596 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctm5d\" (UniqueName: \"kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.738535 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.739335 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.739430 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctm5d\" (UniqueName: \"kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.742655 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.743099 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.765796 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctm5d\" (UniqueName: \"kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:56 crc kubenswrapper[4793]: I0127 20:59:56.929350 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 20:59:57 crc kubenswrapper[4793]: I0127 20:59:57.588098 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m"] Jan 27 20:59:57 crc kubenswrapper[4793]: W0127 20:59:57.610616 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c67116a_7a1e_4e35_8652_4a453d81e4de.slice/crio-503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602 WatchSource:0}: Error finding container 503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602: Status 404 returned error can't find the container with id 503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602 Jan 27 20:59:58 crc kubenswrapper[4793]: I0127 20:59:58.304022 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" event={"ID":"3c67116a-7a1e-4e35-8652-4a453d81e4de","Type":"ContainerStarted","Data":"503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602"} Jan 27 20:59:58 crc kubenswrapper[4793]: I0127 20:59:58.802883 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 20:59:59 crc kubenswrapper[4793]: I0127 20:59:59.316416 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" event={"ID":"3c67116a-7a1e-4e35-8652-4a453d81e4de","Type":"ContainerStarted","Data":"ae7ee0e0b5a77212c637c288cd86d9174e0d90468d67bb4fe13affe1546abb2f"} Jan 27 20:59:59 crc kubenswrapper[4793]: I0127 20:59:59.320919 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86"} Jan 27 20:59:59 crc kubenswrapper[4793]: I0127 20:59:59.334098 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" podStartSLOduration=2.887399424 podStartE2EDuration="3.334070532s" podCreationTimestamp="2026-01-27 20:59:56 +0000 UTC" firstStartedPulling="2026-01-27 20:59:57.613783285 +0000 UTC m=+3423.004036441" lastFinishedPulling="2026-01-27 20:59:58.060454353 +0000 UTC m=+3423.450707549" observedRunningTime="2026-01-27 20:59:59.332670966 +0000 UTC m=+3424.722924132" watchObservedRunningTime="2026-01-27 20:59:59.334070532 +0000 UTC m=+3424.724323688" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.138080 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t"] Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.139835 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.142286 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.142571 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.160257 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t"] Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.250346 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.250782 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk6st\" (UniqueName: \"kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.250939 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.366358 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.366564 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.366610 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk6st\" (UniqueName: \"kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.367665 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.374206 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.409338 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk6st\" (UniqueName: \"kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st\") pod \"collect-profiles-29492460-c4r2t\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:00 crc kubenswrapper[4793]: I0127 21:00:00.554094 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:01 crc kubenswrapper[4793]: I0127 21:00:01.063759 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t"] Jan 27 21:00:01 crc kubenswrapper[4793]: W0127 21:00:01.070192 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d9c191a_345f_48b1_8968_6eb448485a65.slice/crio-5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233 WatchSource:0}: Error finding container 5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233: Status 404 returned error can't find the container with id 5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233 Jan 27 21:00:01 crc kubenswrapper[4793]: I0127 21:00:01.338156 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" event={"ID":"7d9c191a-345f-48b1-8968-6eb448485a65","Type":"ContainerStarted","Data":"232b60b26dcd2c979f7c2a17d3a909ca5338a10cada79794668c9de3ebf219c8"} Jan 27 21:00:01 crc kubenswrapper[4793]: I0127 21:00:01.338399 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" event={"ID":"7d9c191a-345f-48b1-8968-6eb448485a65","Type":"ContainerStarted","Data":"5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233"} Jan 27 21:00:01 crc kubenswrapper[4793]: I0127 21:00:01.366473 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" podStartSLOduration=1.366446421 podStartE2EDuration="1.366446421s" podCreationTimestamp="2026-01-27 21:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 21:00:01.353759719 +0000 UTC m=+3426.744012885" watchObservedRunningTime="2026-01-27 21:00:01.366446421 +0000 UTC m=+3426.756699577" Jan 27 21:00:02 crc kubenswrapper[4793]: I0127 21:00:02.362973 4793 generic.go:334] "Generic (PLEG): container finished" podID="7d9c191a-345f-48b1-8968-6eb448485a65" containerID="232b60b26dcd2c979f7c2a17d3a909ca5338a10cada79794668c9de3ebf219c8" exitCode=0 Jan 27 21:00:02 crc kubenswrapper[4793]: I0127 21:00:02.363528 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" event={"ID":"7d9c191a-345f-48b1-8968-6eb448485a65","Type":"ContainerDied","Data":"232b60b26dcd2c979f7c2a17d3a909ca5338a10cada79794668c9de3ebf219c8"} Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.794886 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.980324 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume\") pod \"7d9c191a-345f-48b1-8968-6eb448485a65\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.980731 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk6st\" (UniqueName: \"kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st\") pod \"7d9c191a-345f-48b1-8968-6eb448485a65\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.980796 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume\") pod \"7d9c191a-345f-48b1-8968-6eb448485a65\" (UID: \"7d9c191a-345f-48b1-8968-6eb448485a65\") " Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.981016 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume" (OuterVolumeSpecName: "config-volume") pod "7d9c191a-345f-48b1-8968-6eb448485a65" (UID: "7d9c191a-345f-48b1-8968-6eb448485a65"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.981489 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d9c191a-345f-48b1-8968-6eb448485a65-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.986728 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st" (OuterVolumeSpecName: "kube-api-access-wk6st") pod "7d9c191a-345f-48b1-8968-6eb448485a65" (UID: "7d9c191a-345f-48b1-8968-6eb448485a65"). InnerVolumeSpecName "kube-api-access-wk6st". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:03 crc kubenswrapper[4793]: I0127 21:00:03.990334 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7d9c191a-345f-48b1-8968-6eb448485a65" (UID: "7d9c191a-345f-48b1-8968-6eb448485a65"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.084495 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk6st\" (UniqueName: \"kubernetes.io/projected/7d9c191a-345f-48b1-8968-6eb448485a65-kube-api-access-wk6st\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.084540 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d9c191a-345f-48b1-8968-6eb448485a65-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.381492 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" event={"ID":"7d9c191a-345f-48b1-8968-6eb448485a65","Type":"ContainerDied","Data":"5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233"} Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.381583 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t" Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.381608 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c45c9b680507e64d23d64ceb155dd6fdc9e08e4b1db603ed26f977e27b3e233" Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.438371 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d"] Jan 27 21:00:04 crc kubenswrapper[4793]: I0127 21:00:04.447455 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492415-4nh4d"] Jan 27 21:00:05 crc kubenswrapper[4793]: I0127 21:00:05.815418 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c25caa3-6c82-4cf8-b868-bafec109e3ea" path="/var/lib/kubelet/pods/8c25caa3-6c82-4cf8-b868-bafec109e3ea/volumes" Jan 27 21:00:08 crc kubenswrapper[4793]: I0127 21:00:08.804178 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:00:08 crc kubenswrapper[4793]: E0127 21:00:08.804741 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:00:09 crc kubenswrapper[4793]: I0127 21:00:09.429958 4793 generic.go:334] "Generic (PLEG): container finished" podID="3c67116a-7a1e-4e35-8652-4a453d81e4de" containerID="ae7ee0e0b5a77212c637c288cd86d9174e0d90468d67bb4fe13affe1546abb2f" exitCode=0 Jan 27 21:00:09 crc kubenswrapper[4793]: I0127 21:00:09.430049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" event={"ID":"3c67116a-7a1e-4e35-8652-4a453d81e4de","Type":"ContainerDied","Data":"ae7ee0e0b5a77212c637c288cd86d9174e0d90468d67bb4fe13affe1546abb2f"} Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.869842 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.919591 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam\") pod \"3c67116a-7a1e-4e35-8652-4a453d81e4de\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.919936 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctm5d\" (UniqueName: \"kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d\") pod \"3c67116a-7a1e-4e35-8652-4a453d81e4de\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.920059 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory\") pod \"3c67116a-7a1e-4e35-8652-4a453d81e4de\" (UID: \"3c67116a-7a1e-4e35-8652-4a453d81e4de\") " Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.935849 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d" (OuterVolumeSpecName: "kube-api-access-ctm5d") pod "3c67116a-7a1e-4e35-8652-4a453d81e4de" (UID: "3c67116a-7a1e-4e35-8652-4a453d81e4de"). InnerVolumeSpecName "kube-api-access-ctm5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.951326 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "3c67116a-7a1e-4e35-8652-4a453d81e4de" (UID: "3c67116a-7a1e-4e35-8652-4a453d81e4de"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:10 crc kubenswrapper[4793]: I0127 21:00:10.965791 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory" (OuterVolumeSpecName: "inventory") pod "3c67116a-7a1e-4e35-8652-4a453d81e4de" (UID: "3c67116a-7a1e-4e35-8652-4a453d81e4de"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.022177 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.022210 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3c67116a-7a1e-4e35-8652-4a453d81e4de-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.022222 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctm5d\" (UniqueName: \"kubernetes.io/projected/3c67116a-7a1e-4e35-8652-4a453d81e4de-kube-api-access-ctm5d\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.448954 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" event={"ID":"3c67116a-7a1e-4e35-8652-4a453d81e4de","Type":"ContainerDied","Data":"503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602"} Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.449089 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="503a1b600f8307b2e8911ff7dccbe7f191a850ba9f686a72e4c3de225e378602" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.448995 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.565354 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f"] Jan 27 21:00:11 crc kubenswrapper[4793]: E0127 21:00:11.566034 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c67116a-7a1e-4e35-8652-4a453d81e4de" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.566178 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c67116a-7a1e-4e35-8652-4a453d81e4de" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:11 crc kubenswrapper[4793]: E0127 21:00:11.566262 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d9c191a-345f-48b1-8968-6eb448485a65" containerName="collect-profiles" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.566331 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d9c191a-345f-48b1-8968-6eb448485a65" containerName="collect-profiles" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.566622 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c67116a-7a1e-4e35-8652-4a453d81e4de" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.566708 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d9c191a-345f-48b1-8968-6eb448485a65" containerName="collect-profiles" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.567410 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.570303 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.570854 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.571389 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.571452 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.571471 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.572125 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.572403 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.572497 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.584430 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f"] Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.634941 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635053 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635103 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635129 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635180 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635232 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635253 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635272 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635298 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635316 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635337 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635355 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635379 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.635415 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csc4f\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.737358 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.737744 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.737848 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.737970 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738074 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738190 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738273 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738349 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738426 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738501 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738595 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.738807 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.739203 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.739344 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csc4f\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.742341 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.743704 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.744128 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.746877 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.746903 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.750183 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.750930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.750955 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.751690 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.752581 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.752609 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.754521 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.758328 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.760256 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csc4f\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-h785f\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:11 crc kubenswrapper[4793]: I0127 21:00:11.891287 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:12 crc kubenswrapper[4793]: I0127 21:00:12.514097 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f"] Jan 27 21:00:12 crc kubenswrapper[4793]: W0127 21:00:12.524209 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03785202_a2ac_4a5d_a761_40636c332578.slice/crio-bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163 WatchSource:0}: Error finding container bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163: Status 404 returned error can't find the container with id bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163 Jan 27 21:00:13 crc kubenswrapper[4793]: I0127 21:00:13.472852 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" event={"ID":"03785202-a2ac-4a5d-a761-40636c332578","Type":"ContainerStarted","Data":"e549ecc042cdbace5975966a5c192eb749444d67feecb329e396d0bb66f14f60"} Jan 27 21:00:13 crc kubenswrapper[4793]: I0127 21:00:13.473144 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" event={"ID":"03785202-a2ac-4a5d-a761-40636c332578","Type":"ContainerStarted","Data":"bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163"} Jan 27 21:00:13 crc kubenswrapper[4793]: I0127 21:00:13.495445 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" podStartSLOduration=2.089245258 podStartE2EDuration="2.495412808s" podCreationTimestamp="2026-01-27 21:00:11 +0000 UTC" firstStartedPulling="2026-01-27 21:00:12.528169566 +0000 UTC m=+3437.918422722" lastFinishedPulling="2026-01-27 21:00:12.934337116 +0000 UTC m=+3438.324590272" observedRunningTime="2026-01-27 21:00:13.488901273 +0000 UTC m=+3438.879154439" watchObservedRunningTime="2026-01-27 21:00:13.495412808 +0000 UTC m=+3438.885665994" Jan 27 21:00:21 crc kubenswrapper[4793]: I0127 21:00:21.803523 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:00:21 crc kubenswrapper[4793]: E0127 21:00:21.804405 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:00:32 crc kubenswrapper[4793]: I0127 21:00:32.802933 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:00:32 crc kubenswrapper[4793]: E0127 21:00:32.803726 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:00:35 crc kubenswrapper[4793]: I0127 21:00:35.643233 4793 scope.go:117] "RemoveContainer" containerID="29b15860166ba63defcc518b77ae7202526164d84e4d3648790c3b16ee6cc210" Jan 27 21:00:46 crc kubenswrapper[4793]: I0127 21:00:46.803288 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:00:46 crc kubenswrapper[4793]: E0127 21:00:46.804257 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:00:54 crc kubenswrapper[4793]: I0127 21:00:54.593219 4793 generic.go:334] "Generic (PLEG): container finished" podID="03785202-a2ac-4a5d-a761-40636c332578" containerID="e549ecc042cdbace5975966a5c192eb749444d67feecb329e396d0bb66f14f60" exitCode=0 Jan 27 21:00:54 crc kubenswrapper[4793]: I0127 21:00:54.593295 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" event={"ID":"03785202-a2ac-4a5d-a761-40636c332578","Type":"ContainerDied","Data":"e549ecc042cdbace5975966a5c192eb749444d67feecb329e396d0bb66f14f60"} Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.133674 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.178775 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.178902 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csc4f\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.178966 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.179030 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.186787 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f" (OuterVolumeSpecName: "kube-api-access-csc4f") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "kube-api-access-csc4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.187847 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.188065 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.194260 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.281217 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.281691 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.281917 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.282073 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.282249 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.282387 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.282516 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.282693 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.283140 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.283327 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle\") pod \"03785202-a2ac-4a5d-a761-40636c332578\" (UID: \"03785202-a2ac-4a5d-a761-40636c332578\") " Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.284199 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.284374 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csc4f\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-kube-api-access-csc4f\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.284462 4793 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.284575 4793 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.286585 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.288161 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.288328 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.290324 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.291821 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.292317 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.293640 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.297312 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.318355 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory" (OuterVolumeSpecName: "inventory") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.324597 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "03785202-a2ac-4a5d-a761-40636c332578" (UID: "03785202-a2ac-4a5d-a761-40636c332578"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386604 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386644 4793 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386663 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386675 4793 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386684 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386692 4793 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386703 4793 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386713 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386720 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/03785202-a2ac-4a5d-a761-40636c332578-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.386732 4793 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03785202-a2ac-4a5d-a761-40636c332578-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.660890 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" event={"ID":"03785202-a2ac-4a5d-a761-40636c332578","Type":"ContainerDied","Data":"bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163"} Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.661169 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb511d7d7f54dbf3258e84aa123af444ba732d9748618c71c7a52abba0d45163" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.660947 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-h785f" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.764311 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv"] Jan 27 21:00:56 crc kubenswrapper[4793]: E0127 21:00:56.764725 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03785202-a2ac-4a5d-a761-40636c332578" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.764743 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="03785202-a2ac-4a5d-a761-40636c332578" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.764944 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="03785202-a2ac-4a5d-a761-40636c332578" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.765770 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.769260 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.769450 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.769270 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.769356 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.773635 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv"] Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.774766 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.798268 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.798770 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.798947 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.799297 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.799417 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrdk9\" (UniqueName: \"kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.902058 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.902153 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrdk9\" (UniqueName: \"kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.902241 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.902308 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.902374 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.903333 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.906821 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.906915 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.907078 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:56 crc kubenswrapper[4793]: I0127 21:00:56.920383 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrdk9\" (UniqueName: \"kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vbrjv\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:57 crc kubenswrapper[4793]: I0127 21:00:57.093027 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:00:57 crc kubenswrapper[4793]: I0127 21:00:57.611136 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv"] Jan 27 21:00:57 crc kubenswrapper[4793]: I0127 21:00:57.621265 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:00:57 crc kubenswrapper[4793]: I0127 21:00:57.673045 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" event={"ID":"f120b137-90a3-45e8-946f-5d32e682696a","Type":"ContainerStarted","Data":"eaa1cb545010c8cf106ab8a37c86f9d2c69072c924937398753ae4b71364b37c"} Jan 27 21:00:58 crc kubenswrapper[4793]: I0127 21:00:58.784428 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" event={"ID":"f120b137-90a3-45e8-946f-5d32e682696a","Type":"ContainerStarted","Data":"03045f34cb41c1193e5c8839363b43bd72168c742fa751966fc67337154bf9ea"} Jan 27 21:00:58 crc kubenswrapper[4793]: I0127 21:00:58.808120 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" podStartSLOduration=2.374899168 podStartE2EDuration="2.808098354s" podCreationTimestamp="2026-01-27 21:00:56 +0000 UTC" firstStartedPulling="2026-01-27 21:00:57.62092939 +0000 UTC m=+3483.011182546" lastFinishedPulling="2026-01-27 21:00:58.054128576 +0000 UTC m=+3483.444381732" observedRunningTime="2026-01-27 21:00:58.798805269 +0000 UTC m=+3484.189058435" watchObservedRunningTime="2026-01-27 21:00:58.808098354 +0000 UTC m=+3484.198351510" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.137825 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29492461-zkrn5"] Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.140876 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.154197 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492461-zkrn5"] Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.306043 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.306132 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.306368 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9qt2\" (UniqueName: \"kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.306426 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.408057 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9qt2\" (UniqueName: \"kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.408123 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.408214 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.408264 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.415912 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.416076 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.416919 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.443943 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9qt2\" (UniqueName: \"kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2\") pod \"keystone-cron-29492461-zkrn5\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.470957 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:00 crc kubenswrapper[4793]: I0127 21:01:00.931636 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492461-zkrn5"] Jan 27 21:01:00 crc kubenswrapper[4793]: W0127 21:01:00.948105 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42f58700_9ffd_4ba4_806d_13d345c8923c.slice/crio-ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e WatchSource:0}: Error finding container ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e: Status 404 returned error can't find the container with id ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e Jan 27 21:01:01 crc kubenswrapper[4793]: I0127 21:01:01.804395 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:01:01 crc kubenswrapper[4793]: E0127 21:01:01.811803 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:01:01 crc kubenswrapper[4793]: I0127 21:01:01.830616 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492461-zkrn5" event={"ID":"42f58700-9ffd-4ba4-806d-13d345c8923c","Type":"ContainerStarted","Data":"a689e1942c2e738dbd5ab275acbf7bf929b6ddfc9671f5bc921707689488f7cf"} Jan 27 21:01:01 crc kubenswrapper[4793]: I0127 21:01:01.830655 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492461-zkrn5" event={"ID":"42f58700-9ffd-4ba4-806d-13d345c8923c","Type":"ContainerStarted","Data":"ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e"} Jan 27 21:01:01 crc kubenswrapper[4793]: I0127 21:01:01.854293 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29492461-zkrn5" podStartSLOduration=1.8542734269999999 podStartE2EDuration="1.854273427s" podCreationTimestamp="2026-01-27 21:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 21:01:01.850277686 +0000 UTC m=+3487.240530842" watchObservedRunningTime="2026-01-27 21:01:01.854273427 +0000 UTC m=+3487.244526584" Jan 27 21:01:04 crc kubenswrapper[4793]: I0127 21:01:04.862411 4793 generic.go:334] "Generic (PLEG): container finished" podID="42f58700-9ffd-4ba4-806d-13d345c8923c" containerID="a689e1942c2e738dbd5ab275acbf7bf929b6ddfc9671f5bc921707689488f7cf" exitCode=0 Jan 27 21:01:04 crc kubenswrapper[4793]: I0127 21:01:04.862565 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492461-zkrn5" event={"ID":"42f58700-9ffd-4ba4-806d-13d345c8923c","Type":"ContainerDied","Data":"a689e1942c2e738dbd5ab275acbf7bf929b6ddfc9671f5bc921707689488f7cf"} Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.199616 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.227131 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9qt2\" (UniqueName: \"kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2\") pod \"42f58700-9ffd-4ba4-806d-13d345c8923c\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.227249 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle\") pod \"42f58700-9ffd-4ba4-806d-13d345c8923c\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.227275 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data\") pod \"42f58700-9ffd-4ba4-806d-13d345c8923c\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.227410 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys\") pod \"42f58700-9ffd-4ba4-806d-13d345c8923c\" (UID: \"42f58700-9ffd-4ba4-806d-13d345c8923c\") " Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.233370 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2" (OuterVolumeSpecName: "kube-api-access-x9qt2") pod "42f58700-9ffd-4ba4-806d-13d345c8923c" (UID: "42f58700-9ffd-4ba4-806d-13d345c8923c"). InnerVolumeSpecName "kube-api-access-x9qt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.233679 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "42f58700-9ffd-4ba4-806d-13d345c8923c" (UID: "42f58700-9ffd-4ba4-806d-13d345c8923c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.259256 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42f58700-9ffd-4ba4-806d-13d345c8923c" (UID: "42f58700-9ffd-4ba4-806d-13d345c8923c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.282688 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data" (OuterVolumeSpecName: "config-data") pod "42f58700-9ffd-4ba4-806d-13d345c8923c" (UID: "42f58700-9ffd-4ba4-806d-13d345c8923c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.330913 4793 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.330954 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9qt2\" (UniqueName: \"kubernetes.io/projected/42f58700-9ffd-4ba4-806d-13d345c8923c-kube-api-access-x9qt2\") on node \"crc\" DevicePath \"\"" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.330969 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.330980 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42f58700-9ffd-4ba4-806d-13d345c8923c-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.880750 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492461-zkrn5" event={"ID":"42f58700-9ffd-4ba4-806d-13d345c8923c","Type":"ContainerDied","Data":"ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e"} Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.881086 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad4cbfe6638d17e0703f951fe8e1bce14201fe33030a4d93d350618233dab71e" Jan 27 21:01:06 crc kubenswrapper[4793]: I0127 21:01:06.880799 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492461-zkrn5" Jan 27 21:01:14 crc kubenswrapper[4793]: I0127 21:01:14.803773 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:01:14 crc kubenswrapper[4793]: E0127 21:01:14.804716 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:01:26 crc kubenswrapper[4793]: I0127 21:01:26.804086 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:01:26 crc kubenswrapper[4793]: E0127 21:01:26.805008 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:01:37 crc kubenswrapper[4793]: I0127 21:01:37.804275 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:01:37 crc kubenswrapper[4793]: E0127 21:01:37.806701 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:01:51 crc kubenswrapper[4793]: I0127 21:01:51.803808 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:01:51 crc kubenswrapper[4793]: E0127 21:01:51.804653 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:02:03 crc kubenswrapper[4793]: I0127 21:02:03.804234 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:02:03 crc kubenswrapper[4793]: E0127 21:02:03.804930 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:02:12 crc kubenswrapper[4793]: I0127 21:02:12.280594 4793 generic.go:334] "Generic (PLEG): container finished" podID="f120b137-90a3-45e8-946f-5d32e682696a" containerID="03045f34cb41c1193e5c8839363b43bd72168c742fa751966fc67337154bf9ea" exitCode=0 Jan 27 21:02:12 crc kubenswrapper[4793]: I0127 21:02:12.280712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" event={"ID":"f120b137-90a3-45e8-946f-5d32e682696a","Type":"ContainerDied","Data":"03045f34cb41c1193e5c8839363b43bd72168c742fa751966fc67337154bf9ea"} Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.782212 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.908280 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0\") pod \"f120b137-90a3-45e8-946f-5d32e682696a\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.908348 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle\") pod \"f120b137-90a3-45e8-946f-5d32e682696a\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.908476 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory\") pod \"f120b137-90a3-45e8-946f-5d32e682696a\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.908572 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrdk9\" (UniqueName: \"kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9\") pod \"f120b137-90a3-45e8-946f-5d32e682696a\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.908698 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam\") pod \"f120b137-90a3-45e8-946f-5d32e682696a\" (UID: \"f120b137-90a3-45e8-946f-5d32e682696a\") " Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.914830 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9" (OuterVolumeSpecName: "kube-api-access-nrdk9") pod "f120b137-90a3-45e8-946f-5d32e682696a" (UID: "f120b137-90a3-45e8-946f-5d32e682696a"). InnerVolumeSpecName "kube-api-access-nrdk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.918807 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "f120b137-90a3-45e8-946f-5d32e682696a" (UID: "f120b137-90a3-45e8-946f-5d32e682696a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.938366 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "f120b137-90a3-45e8-946f-5d32e682696a" (UID: "f120b137-90a3-45e8-946f-5d32e682696a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.944824 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory" (OuterVolumeSpecName: "inventory") pod "f120b137-90a3-45e8-946f-5d32e682696a" (UID: "f120b137-90a3-45e8-946f-5d32e682696a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:02:13 crc kubenswrapper[4793]: I0127 21:02:13.946395 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "f120b137-90a3-45e8-946f-5d32e682696a" (UID: "f120b137-90a3-45e8-946f-5d32e682696a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.013672 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.013983 4793 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/f120b137-90a3-45e8-946f-5d32e682696a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.013998 4793 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.014013 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f120b137-90a3-45e8-946f-5d32e682696a-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.014024 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrdk9\" (UniqueName: \"kubernetes.io/projected/f120b137-90a3-45e8-946f-5d32e682696a-kube-api-access-nrdk9\") on node \"crc\" DevicePath \"\"" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.302334 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" event={"ID":"f120b137-90a3-45e8-946f-5d32e682696a","Type":"ContainerDied","Data":"eaa1cb545010c8cf106ab8a37c86f9d2c69072c924937398753ae4b71364b37c"} Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.302378 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaa1cb545010c8cf106ab8a37c86f9d2c69072c924937398753ae4b71364b37c" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.302439 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vbrjv" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.622853 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc"] Jan 27 21:02:14 crc kubenswrapper[4793]: E0127 21:02:14.623432 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42f58700-9ffd-4ba4-806d-13d345c8923c" containerName="keystone-cron" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.623452 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="42f58700-9ffd-4ba4-806d-13d345c8923c" containerName="keystone-cron" Jan 27 21:02:14 crc kubenswrapper[4793]: E0127 21:02:14.623481 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f120b137-90a3-45e8-946f-5d32e682696a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.623489 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f120b137-90a3-45e8-946f-5d32e682696a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.623772 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f120b137-90a3-45e8-946f-5d32e682696a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.623789 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="42f58700-9ffd-4ba4-806d-13d345c8923c" containerName="keystone-cron" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.624489 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.628833 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.629017 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.629117 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.629158 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.629029 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.629236 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.651064 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc"] Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.825670 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pmd4\" (UniqueName: \"kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.825799 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.825849 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.825890 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.826061 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.826345 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929001 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pmd4\" (UniqueName: \"kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929092 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929341 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929396 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929451 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.929474 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.934455 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.934852 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.935414 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.945320 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.945855 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:14 crc kubenswrapper[4793]: I0127 21:02:14.949488 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pmd4\" (UniqueName: \"kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:15 crc kubenswrapper[4793]: I0127 21:02:15.246165 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:02:15 crc kubenswrapper[4793]: I0127 21:02:15.944538 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc"] Jan 27 21:02:16 crc kubenswrapper[4793]: I0127 21:02:16.320701 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" event={"ID":"124c5ea7-93cd-46ae-be46-fb00f74edaa4","Type":"ContainerStarted","Data":"b1d36f3229c99ec1191a54fb6e23a7a0495a233d49a62798e6f3003bc02b6f98"} Jan 27 21:02:16 crc kubenswrapper[4793]: I0127 21:02:16.871447 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:02:16 crc kubenswrapper[4793]: E0127 21:02:16.871736 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:02:17 crc kubenswrapper[4793]: I0127 21:02:17.331736 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" event={"ID":"124c5ea7-93cd-46ae-be46-fb00f74edaa4","Type":"ContainerStarted","Data":"55d78c44308b22ac26e1bbeafa8fa248e6f29c8c28f35ca055748e891567c475"} Jan 27 21:02:17 crc kubenswrapper[4793]: I0127 21:02:17.362747 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" podStartSLOduration=2.671877487 podStartE2EDuration="3.362725554s" podCreationTimestamp="2026-01-27 21:02:14 +0000 UTC" firstStartedPulling="2026-01-27 21:02:15.962027216 +0000 UTC m=+3561.352280372" lastFinishedPulling="2026-01-27 21:02:16.652875283 +0000 UTC m=+3562.043128439" observedRunningTime="2026-01-27 21:02:17.352635484 +0000 UTC m=+3562.742888650" watchObservedRunningTime="2026-01-27 21:02:17.362725554 +0000 UTC m=+3562.752978720" Jan 27 21:02:22 crc kubenswrapper[4793]: I0127 21:02:22.766947 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:02:22 crc kubenswrapper[4793]: I0127 21:02:22.767687 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:02:30 crc kubenswrapper[4793]: I0127 21:02:30.804147 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:02:30 crc kubenswrapper[4793]: E0127 21:02:30.804921 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:02:45 crc kubenswrapper[4793]: I0127 21:02:45.819464 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:02:45 crc kubenswrapper[4793]: E0127 21:02:45.820500 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.335953 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.338407 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.358255 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.403642 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.403850 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.404055 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpgnq\" (UniqueName: \"kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.560746 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpgnq\" (UniqueName: \"kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.561092 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.561201 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.561865 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.561933 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.604562 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpgnq\" (UniqueName: \"kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq\") pod \"community-operators-2895p\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:51 crc kubenswrapper[4793]: I0127 21:02:51.670871 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.252782 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.684777 4793 generic.go:334] "Generic (PLEG): container finished" podID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerID="cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925" exitCode=0 Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.685012 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerDied","Data":"cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925"} Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.685084 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerStarted","Data":"7961f305ae15d9c99749e5491f1607c878ec2fbfe156947daf29a2d6968c27a4"} Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.753770 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:02:52 crc kubenswrapper[4793]: I0127 21:02:52.753826 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:02:54 crc kubenswrapper[4793]: I0127 21:02:54.708347 4793 generic.go:334] "Generic (PLEG): container finished" podID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerID="463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c" exitCode=0 Jan 27 21:02:54 crc kubenswrapper[4793]: I0127 21:02:54.708389 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerDied","Data":"463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c"} Jan 27 21:02:55 crc kubenswrapper[4793]: I0127 21:02:55.720276 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerStarted","Data":"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6"} Jan 27 21:02:55 crc kubenswrapper[4793]: I0127 21:02:55.754249 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2895p" podStartSLOduration=2.121140024 podStartE2EDuration="4.754227749s" podCreationTimestamp="2026-01-27 21:02:51 +0000 UTC" firstStartedPulling="2026-01-27 21:02:52.688535757 +0000 UTC m=+3598.078788913" lastFinishedPulling="2026-01-27 21:02:55.321623482 +0000 UTC m=+3600.711876638" observedRunningTime="2026-01-27 21:02:55.74545075 +0000 UTC m=+3601.135703906" watchObservedRunningTime="2026-01-27 21:02:55.754227749 +0000 UTC m=+3601.144480905" Jan 27 21:02:57 crc kubenswrapper[4793]: I0127 21:02:57.803430 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:02:57 crc kubenswrapper[4793]: E0127 21:02:57.804233 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:01 crc kubenswrapper[4793]: I0127 21:03:01.671266 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:01 crc kubenswrapper[4793]: I0127 21:03:01.671897 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:01 crc kubenswrapper[4793]: I0127 21:03:01.726920 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:01 crc kubenswrapper[4793]: I0127 21:03:01.824778 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:01 crc kubenswrapper[4793]: I0127 21:03:01.962529 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:03:03 crc kubenswrapper[4793]: I0127 21:03:03.784911 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2895p" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="registry-server" containerID="cri-o://8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6" gracePeriod=2 Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.249795 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.367972 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities\") pod \"6391ef59-8828-4c4e-a583-a8ae37803ef5\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.368054 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpgnq\" (UniqueName: \"kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq\") pod \"6391ef59-8828-4c4e-a583-a8ae37803ef5\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.368102 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content\") pod \"6391ef59-8828-4c4e-a583-a8ae37803ef5\" (UID: \"6391ef59-8828-4c4e-a583-a8ae37803ef5\") " Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.368666 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities" (OuterVolumeSpecName: "utilities") pod "6391ef59-8828-4c4e-a583-a8ae37803ef5" (UID: "6391ef59-8828-4c4e-a583-a8ae37803ef5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.374908 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq" (OuterVolumeSpecName: "kube-api-access-zpgnq") pod "6391ef59-8828-4c4e-a583-a8ae37803ef5" (UID: "6391ef59-8828-4c4e-a583-a8ae37803ef5"). InnerVolumeSpecName "kube-api-access-zpgnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.445879 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6391ef59-8828-4c4e-a583-a8ae37803ef5" (UID: "6391ef59-8828-4c4e-a583-a8ae37803ef5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.470821 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.470858 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpgnq\" (UniqueName: \"kubernetes.io/projected/6391ef59-8828-4c4e-a583-a8ae37803ef5-kube-api-access-zpgnq\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.470868 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6391ef59-8828-4c4e-a583-a8ae37803ef5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.797245 4793 generic.go:334] "Generic (PLEG): container finished" podID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerID="8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6" exitCode=0 Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.797293 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerDied","Data":"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6"} Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.797324 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2895p" event={"ID":"6391ef59-8828-4c4e-a583-a8ae37803ef5","Type":"ContainerDied","Data":"7961f305ae15d9c99749e5491f1607c878ec2fbfe156947daf29a2d6968c27a4"} Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.797340 4793 scope.go:117] "RemoveContainer" containerID="8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.797356 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2895p" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.825794 4793 scope.go:117] "RemoveContainer" containerID="463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.840383 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.849724 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2895p"] Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.854196 4793 scope.go:117] "RemoveContainer" containerID="cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.894723 4793 scope.go:117] "RemoveContainer" containerID="8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6" Jan 27 21:03:04 crc kubenswrapper[4793]: E0127 21:03:04.895350 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6\": container with ID starting with 8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6 not found: ID does not exist" containerID="8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.895412 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6"} err="failed to get container status \"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6\": rpc error: code = NotFound desc = could not find container \"8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6\": container with ID starting with 8947321c88c06741a8c6ac47acb3b8c1ef5e5487edba0715b81ced197ef69ae6 not found: ID does not exist" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.895449 4793 scope.go:117] "RemoveContainer" containerID="463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c" Jan 27 21:03:04 crc kubenswrapper[4793]: E0127 21:03:04.895877 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c\": container with ID starting with 463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c not found: ID does not exist" containerID="463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.895910 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c"} err="failed to get container status \"463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c\": rpc error: code = NotFound desc = could not find container \"463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c\": container with ID starting with 463207b1a037f16c09fc2fa3a6f513050d24f1742b5c4afd5c415aa3bb55303c not found: ID does not exist" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.895930 4793 scope.go:117] "RemoveContainer" containerID="cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925" Jan 27 21:03:04 crc kubenswrapper[4793]: E0127 21:03:04.896266 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925\": container with ID starting with cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925 not found: ID does not exist" containerID="cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925" Jan 27 21:03:04 crc kubenswrapper[4793]: I0127 21:03:04.896298 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925"} err="failed to get container status \"cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925\": rpc error: code = NotFound desc = could not find container \"cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925\": container with ID starting with cf9c4e4ce38a6a26bbc758b6aa4af268ce9b70c31f9651b16ce59c09700a6925 not found: ID does not exist" Jan 27 21:03:05 crc kubenswrapper[4793]: I0127 21:03:05.816023 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" path="/var/lib/kubelet/pods/6391ef59-8828-4c4e-a583-a8ae37803ef5/volumes" Jan 27 21:03:11 crc kubenswrapper[4793]: I0127 21:03:11.804635 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:03:11 crc kubenswrapper[4793]: E0127 21:03:11.806521 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:13 crc kubenswrapper[4793]: I0127 21:03:13.898058 4793 generic.go:334] "Generic (PLEG): container finished" podID="124c5ea7-93cd-46ae-be46-fb00f74edaa4" containerID="55d78c44308b22ac26e1bbeafa8fa248e6f29c8c28f35ca055748e891567c475" exitCode=0 Jan 27 21:03:13 crc kubenswrapper[4793]: I0127 21:03:13.898148 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" event={"ID":"124c5ea7-93cd-46ae-be46-fb00f74edaa4","Type":"ContainerDied","Data":"55d78c44308b22ac26e1bbeafa8fa248e6f29c8c28f35ca055748e891567c475"} Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.331949 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.425708 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.425774 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.425818 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.425849 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.425942 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.426033 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pmd4\" (UniqueName: \"kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4\") pod \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\" (UID: \"124c5ea7-93cd-46ae-be46-fb00f74edaa4\") " Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.431740 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.432413 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4" (OuterVolumeSpecName: "kube-api-access-6pmd4") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "kube-api-access-6pmd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.459765 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.460461 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.463019 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory" (OuterVolumeSpecName: "inventory") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.466422 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "124c5ea7-93cd-46ae-be46-fb00f74edaa4" (UID: "124c5ea7-93cd-46ae-be46-fb00f74edaa4"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.528991 4793 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.529031 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pmd4\" (UniqueName: \"kubernetes.io/projected/124c5ea7-93cd-46ae-be46-fb00f74edaa4-kube-api-access-6pmd4\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.529044 4793 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.529054 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.529063 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.529073 4793 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/124c5ea7-93cd-46ae-be46-fb00f74edaa4-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.920800 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" event={"ID":"124c5ea7-93cd-46ae-be46-fb00f74edaa4","Type":"ContainerDied","Data":"b1d36f3229c99ec1191a54fb6e23a7a0495a233d49a62798e6f3003bc02b6f98"} Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.920842 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1d36f3229c99ec1191a54fb6e23a7a0495a233d49a62798e6f3003bc02b6f98" Jan 27 21:03:15 crc kubenswrapper[4793]: I0127 21:03:15.920938 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.081256 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h"] Jan 27 21:03:16 crc kubenswrapper[4793]: E0127 21:03:16.082074 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="extract-utilities" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082098 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="extract-utilities" Jan 27 21:03:16 crc kubenswrapper[4793]: E0127 21:03:16.082118 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="registry-server" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082126 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="registry-server" Jan 27 21:03:16 crc kubenswrapper[4793]: E0127 21:03:16.082173 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="124c5ea7-93cd-46ae-be46-fb00f74edaa4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082183 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="124c5ea7-93cd-46ae-be46-fb00f74edaa4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 21:03:16 crc kubenswrapper[4793]: E0127 21:03:16.082201 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="extract-content" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082209 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="extract-content" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082445 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="124c5ea7-93cd-46ae-be46-fb00f74edaa4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.082481 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6391ef59-8828-4c4e-a583-a8ae37803ef5" containerName="registry-server" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.083409 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.085427 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.086395 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.086604 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.087475 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.087604 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.096874 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h"] Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.270100 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.270233 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b54dx\" (UniqueName: \"kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.270282 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.270356 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.270418 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.372003 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.372083 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.372208 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b54dx\" (UniqueName: \"kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.372298 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.372501 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.378732 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.378991 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.379056 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.380190 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.394954 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b54dx\" (UniqueName: \"kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.403964 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:03:16 crc kubenswrapper[4793]: I0127 21:03:16.928231 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h"] Jan 27 21:03:16 crc kubenswrapper[4793]: W0127 21:03:16.932397 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3a616d9_776d_49a1_88d7_3292fdbdb7b6.slice/crio-cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80 WatchSource:0}: Error finding container cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80: Status 404 returned error can't find the container with id cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80 Jan 27 21:03:17 crc kubenswrapper[4793]: I0127 21:03:17.940529 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" event={"ID":"b3a616d9-776d-49a1-88d7-3292fdbdb7b6","Type":"ContainerStarted","Data":"6f3b1be4b132b2a13125bdf670901ec1a7a885aa02d0e635233fb2e6411a9ba0"} Jan 27 21:03:17 crc kubenswrapper[4793]: I0127 21:03:17.940888 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" event={"ID":"b3a616d9-776d-49a1-88d7-3292fdbdb7b6","Type":"ContainerStarted","Data":"cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80"} Jan 27 21:03:17 crc kubenswrapper[4793]: I0127 21:03:17.961448 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" podStartSLOduration=1.255743791 podStartE2EDuration="1.961423943s" podCreationTimestamp="2026-01-27 21:03:16 +0000 UTC" firstStartedPulling="2026-01-27 21:03:16.9359967 +0000 UTC m=+3622.326249846" lastFinishedPulling="2026-01-27 21:03:17.641676842 +0000 UTC m=+3623.031929998" observedRunningTime="2026-01-27 21:03:17.957461548 +0000 UTC m=+3623.347714714" watchObservedRunningTime="2026-01-27 21:03:17.961423943 +0000 UTC m=+3623.351677099" Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.753127 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.753682 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.753746 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.754782 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.754856 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86" gracePeriod=600 Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.804134 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:03:22 crc kubenswrapper[4793]: E0127 21:03:22.804661 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.987389 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86" exitCode=0 Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.987451 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86"} Jan 27 21:03:22 crc kubenswrapper[4793]: I0127 21:03:22.987776 4793 scope.go:117] "RemoveContainer" containerID="d3e28294308edfa79d15e9eeb6563088833ff6af0a91b5fd0ae35dcbbb76ab76" Jan 27 21:03:23 crc kubenswrapper[4793]: I0127 21:03:23.999565 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245"} Jan 27 21:03:35 crc kubenswrapper[4793]: I0127 21:03:35.808819 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:03:35 crc kubenswrapper[4793]: E0127 21:03:35.809706 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:48 crc kubenswrapper[4793]: I0127 21:03:48.804206 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:03:49 crc kubenswrapper[4793]: I0127 21:03:49.249340 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114"} Jan 27 21:03:52 crc kubenswrapper[4793]: I0127 21:03:52.278050 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" exitCode=1 Jan 27 21:03:52 crc kubenswrapper[4793]: I0127 21:03:52.278121 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114"} Jan 27 21:03:52 crc kubenswrapper[4793]: I0127 21:03:52.278426 4793 scope.go:117] "RemoveContainer" containerID="27d2bea60b71e0ec5110c87f8cabb8018a4f811b7f704327ee142575dd112292" Jan 27 21:03:52 crc kubenswrapper[4793]: I0127 21:03:52.279094 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:03:52 crc kubenswrapper[4793]: E0127 21:03:52.279426 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:53 crc kubenswrapper[4793]: I0127 21:03:53.243166 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:03:53 crc kubenswrapper[4793]: I0127 21:03:53.291112 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:03:53 crc kubenswrapper[4793]: E0127 21:03:53.291420 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:03:58 crc kubenswrapper[4793]: I0127 21:03:58.243376 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:03:58 crc kubenswrapper[4793]: I0127 21:03:58.244122 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:03:58 crc kubenswrapper[4793]: I0127 21:03:58.244141 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:03:58 crc kubenswrapper[4793]: I0127 21:03:58.245308 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:03:58 crc kubenswrapper[4793]: E0127 21:03:58.245883 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:04:10 crc kubenswrapper[4793]: I0127 21:04:10.803208 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:04:10 crc kubenswrapper[4793]: E0127 21:04:10.803851 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:04:22 crc kubenswrapper[4793]: I0127 21:04:22.804270 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:04:22 crc kubenswrapper[4793]: E0127 21:04:22.805239 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:04:37 crc kubenswrapper[4793]: I0127 21:04:37.804335 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:04:37 crc kubenswrapper[4793]: E0127 21:04:37.805104 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:04:52 crc kubenswrapper[4793]: I0127 21:04:52.803749 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:04:52 crc kubenswrapper[4793]: E0127 21:04:52.804480 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:05:07 crc kubenswrapper[4793]: I0127 21:05:07.803664 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:05:07 crc kubenswrapper[4793]: E0127 21:05:07.804418 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:05:20 crc kubenswrapper[4793]: I0127 21:05:20.804297 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:05:20 crc kubenswrapper[4793]: E0127 21:05:20.805795 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:05:35 crc kubenswrapper[4793]: I0127 21:05:35.813922 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:05:35 crc kubenswrapper[4793]: E0127 21:05:35.816964 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:05:49 crc kubenswrapper[4793]: I0127 21:05:49.803837 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:05:49 crc kubenswrapper[4793]: E0127 21:05:49.804841 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:05:52 crc kubenswrapper[4793]: I0127 21:05:52.753920 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:05:52 crc kubenswrapper[4793]: I0127 21:05:52.754243 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:06:04 crc kubenswrapper[4793]: I0127 21:06:04.803803 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:06:04 crc kubenswrapper[4793]: E0127 21:06:04.805587 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:06:19 crc kubenswrapper[4793]: I0127 21:06:19.803804 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:06:19 crc kubenswrapper[4793]: E0127 21:06:19.804700 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:06:22 crc kubenswrapper[4793]: I0127 21:06:22.753849 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:06:22 crc kubenswrapper[4793]: I0127 21:06:22.754231 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:06:34 crc kubenswrapper[4793]: I0127 21:06:34.805056 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:06:34 crc kubenswrapper[4793]: E0127 21:06:34.807075 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:06:47 crc kubenswrapper[4793]: I0127 21:06:47.803841 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:06:47 crc kubenswrapper[4793]: E0127 21:06:47.804682 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:06:52 crc kubenswrapper[4793]: I0127 21:06:52.753861 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:06:52 crc kubenswrapper[4793]: I0127 21:06:52.754477 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:06:52 crc kubenswrapper[4793]: I0127 21:06:52.754539 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:06:52 crc kubenswrapper[4793]: I0127 21:06:52.755536 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:06:52 crc kubenswrapper[4793]: I0127 21:06:52.755636 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" gracePeriod=600 Jan 27 21:06:52 crc kubenswrapper[4793]: E0127 21:06:52.899670 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:06:53 crc kubenswrapper[4793]: I0127 21:06:53.141698 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" exitCode=0 Jan 27 21:06:53 crc kubenswrapper[4793]: I0127 21:06:53.141744 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245"} Jan 27 21:06:53 crc kubenswrapper[4793]: I0127 21:06:53.141775 4793 scope.go:117] "RemoveContainer" containerID="6841d7e64366c85df7803132fe2e4a8b62672d59f2d2cdd9264dd9d9a7f16f86" Jan 27 21:06:53 crc kubenswrapper[4793]: I0127 21:06:53.142490 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:06:53 crc kubenswrapper[4793]: E0127 21:06:53.142777 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:06:58 crc kubenswrapper[4793]: I0127 21:06:58.804823 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:06:58 crc kubenswrapper[4793]: E0127 21:06:58.806048 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:07:06 crc kubenswrapper[4793]: I0127 21:07:06.854526 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:07:06 crc kubenswrapper[4793]: E0127 21:07:06.855496 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:07:12 crc kubenswrapper[4793]: I0127 21:07:12.804178 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:07:12 crc kubenswrapper[4793]: E0127 21:07:12.804997 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:07:18 crc kubenswrapper[4793]: I0127 21:07:18.803501 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:07:18 crc kubenswrapper[4793]: E0127 21:07:18.804318 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:07:25 crc kubenswrapper[4793]: I0127 21:07:25.835270 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:07:25 crc kubenswrapper[4793]: E0127 21:07:25.836058 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:07:29 crc kubenswrapper[4793]: I0127 21:07:29.803735 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:07:29 crc kubenswrapper[4793]: E0127 21:07:29.804714 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:07:37 crc kubenswrapper[4793]: I0127 21:07:37.804083 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:07:37 crc kubenswrapper[4793]: E0127 21:07:37.804876 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:07:40 crc kubenswrapper[4793]: I0127 21:07:40.803693 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:07:40 crc kubenswrapper[4793]: E0127 21:07:40.805102 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:07:48 crc kubenswrapper[4793]: I0127 21:07:48.803504 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:07:48 crc kubenswrapper[4793]: E0127 21:07:48.804404 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:07:53 crc kubenswrapper[4793]: I0127 21:07:53.803402 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:07:53 crc kubenswrapper[4793]: E0127 21:07:53.804243 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:07:56 crc kubenswrapper[4793]: I0127 21:07:56.619742 4793 generic.go:334] "Generic (PLEG): container finished" podID="b3a616d9-776d-49a1-88d7-3292fdbdb7b6" containerID="6f3b1be4b132b2a13125bdf670901ec1a7a885aa02d0e635233fb2e6411a9ba0" exitCode=0 Jan 27 21:07:56 crc kubenswrapper[4793]: I0127 21:07:56.619786 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" event={"ID":"b3a616d9-776d-49a1-88d7-3292fdbdb7b6","Type":"ContainerDied","Data":"6f3b1be4b132b2a13125bdf670901ec1a7a885aa02d0e635233fb2e6411a9ba0"} Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.136984 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.290945 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle\") pod \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.290991 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0\") pod \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.291017 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b54dx\" (UniqueName: \"kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx\") pod \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.291178 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory\") pod \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.291240 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam\") pod \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\" (UID: \"b3a616d9-776d-49a1-88d7-3292fdbdb7b6\") " Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.297667 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx" (OuterVolumeSpecName: "kube-api-access-b54dx") pod "b3a616d9-776d-49a1-88d7-3292fdbdb7b6" (UID: "b3a616d9-776d-49a1-88d7-3292fdbdb7b6"). InnerVolumeSpecName "kube-api-access-b54dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.298131 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b3a616d9-776d-49a1-88d7-3292fdbdb7b6" (UID: "b3a616d9-776d-49a1-88d7-3292fdbdb7b6"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.325460 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory" (OuterVolumeSpecName: "inventory") pod "b3a616d9-776d-49a1-88d7-3292fdbdb7b6" (UID: "b3a616d9-776d-49a1-88d7-3292fdbdb7b6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.327962 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b3a616d9-776d-49a1-88d7-3292fdbdb7b6" (UID: "b3a616d9-776d-49a1-88d7-3292fdbdb7b6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.338783 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "b3a616d9-776d-49a1-88d7-3292fdbdb7b6" (UID: "b3a616d9-776d-49a1-88d7-3292fdbdb7b6"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.394880 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.394920 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.394932 4793 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.394943 4793 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.394952 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b54dx\" (UniqueName: \"kubernetes.io/projected/b3a616d9-776d-49a1-88d7-3292fdbdb7b6-kube-api-access-b54dx\") on node \"crc\" DevicePath \"\"" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.697851 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" event={"ID":"b3a616d9-776d-49a1-88d7-3292fdbdb7b6","Type":"ContainerDied","Data":"cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80"} Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.698189 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf1f4483e35a96b09de2309b1a9addf37954c0be77afa2c1cbf761a0c481fb80" Jan 27 21:07:58 crc kubenswrapper[4793]: I0127 21:07:58.697930 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.062567 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2"] Jan 27 21:07:59 crc kubenswrapper[4793]: E0127 21:07:59.063090 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a616d9-776d-49a1-88d7-3292fdbdb7b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.063105 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a616d9-776d-49a1-88d7-3292fdbdb7b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.063344 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3a616d9-776d-49a1-88d7-3292fdbdb7b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.064213 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.085437 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.085905 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.086076 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.086211 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.086343 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.087005 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.087108 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.107420 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2"] Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.246332 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247128 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247188 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247327 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxxzx\" (UniqueName: \"kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247437 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247533 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247646 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247750 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.247806 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349459 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349513 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349538 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349611 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxxzx\" (UniqueName: \"kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349671 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349703 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349740 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349797 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.349829 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.351750 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.355480 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.355682 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.355784 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.358630 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.358723 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.359378 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.359750 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.369864 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxxzx\" (UniqueName: \"kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pb8b2\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.401533 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.971637 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2"] Jan 27 21:07:59 crc kubenswrapper[4793]: W0127 21:07:59.976492 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod187681ff_22a7_4ec2_97f0_94d51c9dc1ca.slice/crio-ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde WatchSource:0}: Error finding container ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde: Status 404 returned error can't find the container with id ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde Jan 27 21:07:59 crc kubenswrapper[4793]: I0127 21:07:59.979103 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:08:00 crc kubenswrapper[4793]: I0127 21:08:00.718565 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" event={"ID":"187681ff-22a7-4ec2-97f0-94d51c9dc1ca","Type":"ContainerStarted","Data":"0c0c66d9bf25b839d3946489ce41793ad4d25c6c036795a2cfc29084503a9710"} Jan 27 21:08:00 crc kubenswrapper[4793]: I0127 21:08:00.719846 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" event={"ID":"187681ff-22a7-4ec2-97f0-94d51c9dc1ca","Type":"ContainerStarted","Data":"ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde"} Jan 27 21:08:00 crc kubenswrapper[4793]: I0127 21:08:00.742573 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" podStartSLOduration=2.35999962 podStartE2EDuration="2.742530766s" podCreationTimestamp="2026-01-27 21:07:58 +0000 UTC" firstStartedPulling="2026-01-27 21:07:59.9788699 +0000 UTC m=+3905.369123056" lastFinishedPulling="2026-01-27 21:08:00.361401046 +0000 UTC m=+3905.751654202" observedRunningTime="2026-01-27 21:08:00.737290196 +0000 UTC m=+3906.127543362" watchObservedRunningTime="2026-01-27 21:08:00.742530766 +0000 UTC m=+3906.132783952" Jan 27 21:08:01 crc kubenswrapper[4793]: I0127 21:08:01.803103 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:01 crc kubenswrapper[4793]: E0127 21:08:01.803718 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:08 crc kubenswrapper[4793]: I0127 21:08:08.803447 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:08:08 crc kubenswrapper[4793]: E0127 21:08:08.804326 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:08:14 crc kubenswrapper[4793]: I0127 21:08:14.803488 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:14 crc kubenswrapper[4793]: E0127 21:08:14.804242 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:22 crc kubenswrapper[4793]: I0127 21:08:22.803121 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:08:22 crc kubenswrapper[4793]: E0127 21:08:22.805070 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:08:25 crc kubenswrapper[4793]: I0127 21:08:25.811004 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:25 crc kubenswrapper[4793]: E0127 21:08:25.811818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:35 crc kubenswrapper[4793]: I0127 21:08:35.810617 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:08:35 crc kubenswrapper[4793]: E0127 21:08:35.811768 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:08:40 crc kubenswrapper[4793]: I0127 21:08:40.803925 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:40 crc kubenswrapper[4793]: E0127 21:08:40.804815 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:49 crc kubenswrapper[4793]: I0127 21:08:49.803310 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:08:49 crc kubenswrapper[4793]: E0127 21:08:49.804615 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:08:53 crc kubenswrapper[4793]: I0127 21:08:53.803470 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:54 crc kubenswrapper[4793]: I0127 21:08:54.380129 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55"} Jan 27 21:08:57 crc kubenswrapper[4793]: I0127 21:08:57.413513 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" exitCode=1 Jan 27 21:08:57 crc kubenswrapper[4793]: I0127 21:08:57.413712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55"} Jan 27 21:08:57 crc kubenswrapper[4793]: I0127 21:08:57.414335 4793 scope.go:117] "RemoveContainer" containerID="e1d2fdf629f22480e437f3119ba461b850ec6c76dbc2f69e2305f0f5a36cb114" Jan 27 21:08:57 crc kubenswrapper[4793]: I0127 21:08:57.415397 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:08:57 crc kubenswrapper[4793]: E0127 21:08:57.415990 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:58 crc kubenswrapper[4793]: I0127 21:08:58.242620 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:08:58 crc kubenswrapper[4793]: I0127 21:08:58.242841 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:08:58 crc kubenswrapper[4793]: I0127 21:08:58.242852 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:08:58 crc kubenswrapper[4793]: I0127 21:08:58.242865 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:08:58 crc kubenswrapper[4793]: I0127 21:08:58.433064 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:08:58 crc kubenswrapper[4793]: E0127 21:08:58.433414 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:08:59 crc kubenswrapper[4793]: I0127 21:08:59.456797 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:08:59 crc kubenswrapper[4793]: E0127 21:08:59.457367 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:09:02 crc kubenswrapper[4793]: I0127 21:09:02.804284 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:09:02 crc kubenswrapper[4793]: E0127 21:09:02.805149 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:09:13 crc kubenswrapper[4793]: I0127 21:09:13.804394 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:09:13 crc kubenswrapper[4793]: E0127 21:09:13.805109 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:09:15 crc kubenswrapper[4793]: I0127 21:09:15.809687 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:09:15 crc kubenswrapper[4793]: E0127 21:09:15.810201 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:09:26 crc kubenswrapper[4793]: I0127 21:09:26.804196 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:09:26 crc kubenswrapper[4793]: E0127 21:09:26.805027 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:09:28 crc kubenswrapper[4793]: I0127 21:09:28.803870 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:09:28 crc kubenswrapper[4793]: E0127 21:09:28.804446 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:09:40 crc kubenswrapper[4793]: I0127 21:09:40.803727 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:09:40 crc kubenswrapper[4793]: E0127 21:09:40.804573 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:09:41 crc kubenswrapper[4793]: I0127 21:09:41.803841 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:09:41 crc kubenswrapper[4793]: E0127 21:09:41.804711 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:09:53 crc kubenswrapper[4793]: I0127 21:09:53.808754 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:09:53 crc kubenswrapper[4793]: E0127 21:09:53.809907 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:09:55 crc kubenswrapper[4793]: I0127 21:09:55.811955 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:09:55 crc kubenswrapper[4793]: E0127 21:09:55.813013 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:10:05 crc kubenswrapper[4793]: I0127 21:10:05.819693 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:10:05 crc kubenswrapper[4793]: E0127 21:10:05.820893 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:10:09 crc kubenswrapper[4793]: I0127 21:10:09.805231 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:10:09 crc kubenswrapper[4793]: E0127 21:10:09.806666 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:10:19 crc kubenswrapper[4793]: I0127 21:10:19.856765 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:10:19 crc kubenswrapper[4793]: E0127 21:10:19.857956 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:10:23 crc kubenswrapper[4793]: I0127 21:10:23.804388 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:10:23 crc kubenswrapper[4793]: E0127 21:10:23.805364 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:10:33 crc kubenswrapper[4793]: I0127 21:10:33.804452 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:10:33 crc kubenswrapper[4793]: E0127 21:10:33.805205 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:10:38 crc kubenswrapper[4793]: I0127 21:10:38.803567 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:10:38 crc kubenswrapper[4793]: E0127 21:10:38.804443 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:10:45 crc kubenswrapper[4793]: I0127 21:10:45.820486 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:10:45 crc kubenswrapper[4793]: E0127 21:10:45.821219 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:10:46 crc kubenswrapper[4793]: I0127 21:10:46.948155 4793 generic.go:334] "Generic (PLEG): container finished" podID="187681ff-22a7-4ec2-97f0-94d51c9dc1ca" containerID="0c0c66d9bf25b839d3946489ce41793ad4d25c6c036795a2cfc29084503a9710" exitCode=0 Jan 27 21:10:46 crc kubenswrapper[4793]: I0127 21:10:46.948246 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" event={"ID":"187681ff-22a7-4ec2-97f0-94d51c9dc1ca","Type":"ContainerDied","Data":"0c0c66d9bf25b839d3946489ce41793ad4d25c6c036795a2cfc29084503a9710"} Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.561961 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722214 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722288 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722327 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722393 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722527 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxxzx\" (UniqueName: \"kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722736 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722776 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722838 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.722871 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1\") pod \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\" (UID: \"187681ff-22a7-4ec2-97f0-94d51c9dc1ca\") " Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.744048 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx" (OuterVolumeSpecName: "kube-api-access-pxxzx") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "kube-api-access-pxxzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.745402 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.749912 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.757904 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.758328 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory" (OuterVolumeSpecName: "inventory") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.758988 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.759633 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.760850 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.777801 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "187681ff-22a7-4ec2-97f0-94d51c9dc1ca" (UID: "187681ff-22a7-4ec2-97f0-94d51c9dc1ca"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837469 4793 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837749 4793 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837790 4793 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837805 4793 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837823 4793 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837836 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837849 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837861 4793 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.837873 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxxzx\" (UniqueName: \"kubernetes.io/projected/187681ff-22a7-4ec2-97f0-94d51c9dc1ca-kube-api-access-pxxzx\") on node \"crc\" DevicePath \"\"" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.972189 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" event={"ID":"187681ff-22a7-4ec2-97f0-94d51c9dc1ca","Type":"ContainerDied","Data":"ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde"} Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.972245 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pb8b2" Jan 27 21:10:48 crc kubenswrapper[4793]: I0127 21:10:48.972265 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebaeda40452d44fbcfce4527cd2d7c262c10831f9eae10a0bcdae6748a58ecde" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.077372 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs"] Jan 27 21:10:49 crc kubenswrapper[4793]: E0127 21:10:49.077857 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="187681ff-22a7-4ec2-97f0-94d51c9dc1ca" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.077873 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="187681ff-22a7-4ec2-97f0-94d51c9dc1ca" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.078073 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="187681ff-22a7-4ec2-97f0-94d51c9dc1ca" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.078778 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.081243 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-bvxvd" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.081589 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.081622 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.084439 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.086452 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.097175 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs"] Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.143682 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpvw4\" (UniqueName: \"kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.143787 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.143820 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.143849 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.143874 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.144016 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.144053 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.245791 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.245881 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.245926 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.245963 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.246062 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.246233 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.247005 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpvw4\" (UniqueName: \"kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.250278 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.250348 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.251122 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.251514 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.251736 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.251966 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.268724 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpvw4\" (UniqueName: \"kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:49 crc kubenswrapper[4793]: I0127 21:10:49.397949 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:10:50 crc kubenswrapper[4793]: I0127 21:10:50.099214 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs"] Jan 27 21:10:50 crc kubenswrapper[4793]: W0127 21:10:50.104835 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d45706e_d075_45fc_9d80_b21908572463.slice/crio-200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd WatchSource:0}: Error finding container 200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd: Status 404 returned error can't find the container with id 200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd Jan 27 21:10:50 crc kubenswrapper[4793]: I0127 21:10:50.803808 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:10:50 crc kubenswrapper[4793]: E0127 21:10:50.804139 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:10:51 crc kubenswrapper[4793]: I0127 21:10:51.012113 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" event={"ID":"9d45706e-d075-45fc-9d80-b21908572463","Type":"ContainerStarted","Data":"20dccf2399e99f1a4d460449ae667bc81c20be782da84ab54020c51b98374241"} Jan 27 21:10:51 crc kubenswrapper[4793]: I0127 21:10:51.012439 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" event={"ID":"9d45706e-d075-45fc-9d80-b21908572463","Type":"ContainerStarted","Data":"200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd"} Jan 27 21:10:51 crc kubenswrapper[4793]: I0127 21:10:51.036082 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" podStartSLOduration=1.59197134 podStartE2EDuration="2.036063965s" podCreationTimestamp="2026-01-27 21:10:49 +0000 UTC" firstStartedPulling="2026-01-27 21:10:50.107518518 +0000 UTC m=+4075.497771674" lastFinishedPulling="2026-01-27 21:10:50.551611143 +0000 UTC m=+4075.941864299" observedRunningTime="2026-01-27 21:10:51.030799305 +0000 UTC m=+4076.421052461" watchObservedRunningTime="2026-01-27 21:10:51.036063965 +0000 UTC m=+4076.426317111" Jan 27 21:10:59 crc kubenswrapper[4793]: I0127 21:10:59.803885 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:10:59 crc kubenswrapper[4793]: E0127 21:10:59.804671 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:11:05 crc kubenswrapper[4793]: I0127 21:11:05.811033 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:11:05 crc kubenswrapper[4793]: E0127 21:11:05.812172 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:11:14 crc kubenswrapper[4793]: I0127 21:11:14.804146 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:11:14 crc kubenswrapper[4793]: E0127 21:11:14.805254 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:11:16 crc kubenswrapper[4793]: I0127 21:11:16.803718 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:11:16 crc kubenswrapper[4793]: E0127 21:11:16.804366 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:11:25 crc kubenswrapper[4793]: I0127 21:11:25.805359 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:11:25 crc kubenswrapper[4793]: E0127 21:11:25.806394 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:11:28 crc kubenswrapper[4793]: I0127 21:11:28.802916 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:11:28 crc kubenswrapper[4793]: E0127 21:11:28.803777 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:11:36 crc kubenswrapper[4793]: I0127 21:11:36.803362 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:11:36 crc kubenswrapper[4793]: E0127 21:11:36.804189 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:11:42 crc kubenswrapper[4793]: I0127 21:11:42.804210 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:11:42 crc kubenswrapper[4793]: E0127 21:11:42.805476 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:11:49 crc kubenswrapper[4793]: I0127 21:11:49.802949 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:11:49 crc kubenswrapper[4793]: E0127 21:11:49.803659 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:11:57 crc kubenswrapper[4793]: I0127 21:11:57.807267 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:11:58 crc kubenswrapper[4793]: I0127 21:11:58.701286 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e"} Jan 27 21:12:01 crc kubenswrapper[4793]: I0127 21:12:01.803990 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:12:01 crc kubenswrapper[4793]: E0127 21:12:01.805246 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:12:13 crc kubenswrapper[4793]: I0127 21:12:13.804675 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:12:13 crc kubenswrapper[4793]: E0127 21:12:13.805838 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:12:26 crc kubenswrapper[4793]: I0127 21:12:26.811152 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:12:26 crc kubenswrapper[4793]: E0127 21:12:26.811729 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:12:37 crc kubenswrapper[4793]: I0127 21:12:37.803323 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:12:37 crc kubenswrapper[4793]: E0127 21:12:37.804159 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:12:48 crc kubenswrapper[4793]: I0127 21:12:48.803395 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:12:48 crc kubenswrapper[4793]: E0127 21:12:48.804030 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:00 crc kubenswrapper[4793]: I0127 21:13:00.803616 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:13:00 crc kubenswrapper[4793]: E0127 21:13:00.804580 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:13 crc kubenswrapper[4793]: I0127 21:13:13.803715 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:13:13 crc kubenswrapper[4793]: E0127 21:13:13.804784 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:23 crc kubenswrapper[4793]: I0127 21:13:23.055389 4793 generic.go:334] "Generic (PLEG): container finished" podID="9d45706e-d075-45fc-9d80-b21908572463" containerID="20dccf2399e99f1a4d460449ae667bc81c20be782da84ab54020c51b98374241" exitCode=0 Jan 27 21:13:23 crc kubenswrapper[4793]: I0127 21:13:23.055463 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" event={"ID":"9d45706e-d075-45fc-9d80-b21908572463","Type":"ContainerDied","Data":"20dccf2399e99f1a4d460449ae667bc81c20be782da84ab54020c51b98374241"} Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.529028 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.703827 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpvw4\" (UniqueName: \"kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704301 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704385 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704411 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704449 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704632 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.704724 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1\") pod \"9d45706e-d075-45fc-9d80-b21908572463\" (UID: \"9d45706e-d075-45fc-9d80-b21908572463\") " Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.711588 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4" (OuterVolumeSpecName: "kube-api-access-hpvw4") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "kube-api-access-hpvw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.726744 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.743785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.745714 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.749376 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory" (OuterVolumeSpecName: "inventory") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.762268 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.770881 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "9d45706e-d075-45fc-9d80-b21908572463" (UID: "9d45706e-d075-45fc-9d80-b21908572463"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810205 4793 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810263 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpvw4\" (UniqueName: \"kubernetes.io/projected/9d45706e-d075-45fc-9d80-b21908572463-kube-api-access-hpvw4\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810283 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810298 4793 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810314 4793 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810333 4793 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:24 crc kubenswrapper[4793]: I0127 21:13:24.810353 4793 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d45706e-d075-45fc-9d80-b21908572463-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:25 crc kubenswrapper[4793]: I0127 21:13:25.075959 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" event={"ID":"9d45706e-d075-45fc-9d80-b21908572463","Type":"ContainerDied","Data":"200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd"} Jan 27 21:13:25 crc kubenswrapper[4793]: I0127 21:13:25.076003 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs" Jan 27 21:13:25 crc kubenswrapper[4793]: I0127 21:13:25.076011 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="200dc554deeb9366f37e96e3946ae71c664259a14359cc53b7439c1652b35ffd" Jan 27 21:13:28 crc kubenswrapper[4793]: I0127 21:13:28.803074 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:13:28 crc kubenswrapper[4793]: E0127 21:13:28.804045 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.804213 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:13:39 crc kubenswrapper[4793]: E0127 21:13:39.805329 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.853982 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:13:39 crc kubenswrapper[4793]: E0127 21:13:39.854933 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d45706e-d075-45fc-9d80-b21908572463" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.854970 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d45706e-d075-45fc-9d80-b21908572463" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.855330 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d45706e-d075-45fc-9d80-b21908572463" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.857785 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:39 crc kubenswrapper[4793]: I0127 21:13:39.886182 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.010681 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdsc4\" (UniqueName: \"kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.011072 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.011331 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.033833 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.036564 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.044893 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.114467 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.114580 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdsc4\" (UniqueName: \"kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.115318 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.115449 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.189732 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.217340 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn8fk\" (UniqueName: \"kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.217403 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.217588 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.318819 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.318887 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn8fk\" (UniqueName: \"kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.318918 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.319478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.319484 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.329397 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdsc4\" (UniqueName: \"kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4\") pod \"redhat-marketplace-wvx4k\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.336212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn8fk\" (UniqueName: \"kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk\") pod \"community-operators-jph8r\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.359851 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:40 crc kubenswrapper[4793]: I0127 21:13:40.523961 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.013311 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.177763 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.367958 4793 generic.go:334] "Generic (PLEG): container finished" podID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerID="416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754" exitCode=0 Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.368078 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerDied","Data":"416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754"} Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.368390 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerStarted","Data":"f97cf3d0893e27b29cb59cd9914c550aa4bfad0b4a8491ea6c730671b7b0c19a"} Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.370093 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:13:41 crc kubenswrapper[4793]: I0127 21:13:41.370615 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerStarted","Data":"ca57516e004614c79962fc4aa42ecf1e1d5d4810f185e066968330ab5e658a71"} Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.381975 4793 generic.go:334] "Generic (PLEG): container finished" podID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerID="8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda" exitCode=0 Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.382316 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerDied","Data":"8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda"} Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.454626 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.457468 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.461166 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.471309 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stpcc\" (UniqueName: \"kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.471485 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.471510 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.573611 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.573862 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.574179 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stpcc\" (UniqueName: \"kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.574197 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.574220 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.592151 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stpcc\" (UniqueName: \"kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc\") pod \"redhat-operators-wbrzc\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:42 crc kubenswrapper[4793]: I0127 21:13:42.786795 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:43 crc kubenswrapper[4793]: I0127 21:13:43.540171 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.138072 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.140383 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerStarted","Data":"7de039506bae5f19b2ef6f8f675c7dd515a23154e3fd1f1cbf0c600fd905428d"} Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.140448 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerStarted","Data":"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce"} Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.140464 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.140607 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.193858 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.193932 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l5pr\" (UniqueName: \"kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.194130 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.295716 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.296006 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.296058 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l5pr\" (UniqueName: \"kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.296412 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.296490 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.316787 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l5pr\" (UniqueName: \"kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr\") pod \"certified-operators-xnz8n\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:44 crc kubenswrapper[4793]: I0127 21:13:44.318611 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.094364 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.117974 4793 generic.go:334] "Generic (PLEG): container finished" podID="b96c45ee-df43-464e-98e6-021beeb75939" containerID="f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c" exitCode=0 Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.118089 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerDied","Data":"f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c"} Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.121372 4793 generic.go:334] "Generic (PLEG): container finished" podID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerID="498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce" exitCode=0 Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.121506 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerDied","Data":"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce"} Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.127329 4793 generic.go:334] "Generic (PLEG): container finished" podID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerID="63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c" exitCode=0 Jan 27 21:13:45 crc kubenswrapper[4793]: I0127 21:13:45.127382 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerDied","Data":"63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.137420 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerStarted","Data":"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.143508 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerStarted","Data":"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.146774 4793 generic.go:334] "Generic (PLEG): container finished" podID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerID="5d97b3762ffdca885b29919de77f82d6ad400b70d02be817bfce30709e5fcb95" exitCode=0 Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.146975 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerDied","Data":"5d97b3762ffdca885b29919de77f82d6ad400b70d02be817bfce30709e5fcb95"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.147089 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerStarted","Data":"03fd0c3007ba12f2931b1d1a348607682b6c6a8b855168509e95f6c6807ffbd4"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.151756 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerStarted","Data":"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab"} Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.441025 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jph8r" podStartSLOduration=2.254758356 podStartE2EDuration="6.441005202s" podCreationTimestamp="2026-01-27 21:13:40 +0000 UTC" firstStartedPulling="2026-01-27 21:13:41.369745681 +0000 UTC m=+4246.759998847" lastFinishedPulling="2026-01-27 21:13:45.555992537 +0000 UTC m=+4250.946245693" observedRunningTime="2026-01-27 21:13:46.430474134 +0000 UTC m=+4251.820727290" watchObservedRunningTime="2026-01-27 21:13:46.441005202 +0000 UTC m=+4251.831258358" Jan 27 21:13:46 crc kubenswrapper[4793]: I0127 21:13:46.456295 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wvx4k" podStartSLOduration=4.295970062 podStartE2EDuration="7.456278878s" podCreationTimestamp="2026-01-27 21:13:39 +0000 UTC" firstStartedPulling="2026-01-27 21:13:42.384406804 +0000 UTC m=+4247.774660000" lastFinishedPulling="2026-01-27 21:13:45.54471566 +0000 UTC m=+4250.934968816" observedRunningTime="2026-01-27 21:13:46.45267825 +0000 UTC m=+4251.842931406" watchObservedRunningTime="2026-01-27 21:13:46.456278878 +0000 UTC m=+4251.846532034" Jan 27 21:13:48 crc kubenswrapper[4793]: I0127 21:13:48.175160 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerStarted","Data":"4a0235cd5c4d95508bc6ca89e016e449ff5461b4413db929ebb825ec8910561a"} Jan 27 21:13:49 crc kubenswrapper[4793]: I0127 21:13:49.188168 4793 generic.go:334] "Generic (PLEG): container finished" podID="b96c45ee-df43-464e-98e6-021beeb75939" containerID="e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462" exitCode=0 Jan 27 21:13:49 crc kubenswrapper[4793]: I0127 21:13:49.188277 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerDied","Data":"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462"} Jan 27 21:13:49 crc kubenswrapper[4793]: I0127 21:13:49.190353 4793 generic.go:334] "Generic (PLEG): container finished" podID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerID="4a0235cd5c4d95508bc6ca89e016e449ff5461b4413db929ebb825ec8910561a" exitCode=0 Jan 27 21:13:49 crc kubenswrapper[4793]: I0127 21:13:49.190396 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerDied","Data":"4a0235cd5c4d95508bc6ca89e016e449ff5461b4413db929ebb825ec8910561a"} Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.202931 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerStarted","Data":"070ce88bc60f90341fb2220c2d0f17e89c4df2068d8630b741fa7794b0330611"} Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.206462 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerStarted","Data":"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656"} Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.248174 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wbrzc" podStartSLOduration=3.782320872 podStartE2EDuration="8.248152539s" podCreationTimestamp="2026-01-27 21:13:42 +0000 UTC" firstStartedPulling="2026-01-27 21:13:45.119612721 +0000 UTC m=+4250.509865877" lastFinishedPulling="2026-01-27 21:13:49.585444388 +0000 UTC m=+4254.975697544" observedRunningTime="2026-01-27 21:13:50.245697088 +0000 UTC m=+4255.635950264" watchObservedRunningTime="2026-01-27 21:13:50.248152539 +0000 UTC m=+4255.638405695" Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.254300 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xnz8n" podStartSLOduration=3.800836398 podStartE2EDuration="7.254266789s" podCreationTimestamp="2026-01-27 21:13:43 +0000 UTC" firstStartedPulling="2026-01-27 21:13:46.148603815 +0000 UTC m=+4251.538856971" lastFinishedPulling="2026-01-27 21:13:49.602034206 +0000 UTC m=+4254.992287362" observedRunningTime="2026-01-27 21:13:50.225224965 +0000 UTC m=+4255.615478121" watchObservedRunningTime="2026-01-27 21:13:50.254266789 +0000 UTC m=+4255.644519965" Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.360916 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.361735 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.525501 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:50 crc kubenswrapper[4793]: I0127 21:13:50.525580 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:13:51 crc kubenswrapper[4793]: I0127 21:13:51.405655 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-jph8r" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="registry-server" probeResult="failure" output=< Jan 27 21:13:51 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:13:51 crc kubenswrapper[4793]: > Jan 27 21:13:51 crc kubenswrapper[4793]: I0127 21:13:51.570880 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-wvx4k" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="registry-server" probeResult="failure" output=< Jan 27 21:13:51 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:13:51 crc kubenswrapper[4793]: > Jan 27 21:13:52 crc kubenswrapper[4793]: I0127 21:13:52.787746 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:52 crc kubenswrapper[4793]: I0127 21:13:52.788056 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:13:52 crc kubenswrapper[4793]: I0127 21:13:52.804495 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:13:52 crc kubenswrapper[4793]: E0127 21:13:52.804810 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:13:53 crc kubenswrapper[4793]: I0127 21:13:53.838331 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbrzc" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="registry-server" probeResult="failure" output=< Jan 27 21:13:53 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:13:53 crc kubenswrapper[4793]: > Jan 27 21:13:54 crc kubenswrapper[4793]: I0127 21:13:54.318836 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:54 crc kubenswrapper[4793]: I0127 21:13:54.319250 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:54 crc kubenswrapper[4793]: I0127 21:13:54.371715 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:55 crc kubenswrapper[4793]: I0127 21:13:55.305989 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:57 crc kubenswrapper[4793]: I0127 21:13:57.828532 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:57 crc kubenswrapper[4793]: I0127 21:13:57.829328 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xnz8n" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="registry-server" containerID="cri-o://070ce88bc60f90341fb2220c2d0f17e89c4df2068d8630b741fa7794b0330611" gracePeriod=2 Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.284849 4793 generic.go:334] "Generic (PLEG): container finished" podID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerID="070ce88bc60f90341fb2220c2d0f17e89c4df2068d8630b741fa7794b0330611" exitCode=0 Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.285130 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerDied","Data":"070ce88bc60f90341fb2220c2d0f17e89c4df2068d8630b741fa7794b0330611"} Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.590321 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.762195 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content\") pod \"133f9908-e135-4dd8-8213-37fde5ca3e2c\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.762262 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities\") pod \"133f9908-e135-4dd8-8213-37fde5ca3e2c\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.762432 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8l5pr\" (UniqueName: \"kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr\") pod \"133f9908-e135-4dd8-8213-37fde5ca3e2c\" (UID: \"133f9908-e135-4dd8-8213-37fde5ca3e2c\") " Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.763821 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities" (OuterVolumeSpecName: "utilities") pod "133f9908-e135-4dd8-8213-37fde5ca3e2c" (UID: "133f9908-e135-4dd8-8213-37fde5ca3e2c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.772185 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr" (OuterVolumeSpecName: "kube-api-access-8l5pr") pod "133f9908-e135-4dd8-8213-37fde5ca3e2c" (UID: "133f9908-e135-4dd8-8213-37fde5ca3e2c"). InnerVolumeSpecName "kube-api-access-8l5pr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.812408 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "133f9908-e135-4dd8-8213-37fde5ca3e2c" (UID: "133f9908-e135-4dd8-8213-37fde5ca3e2c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.864731 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8l5pr\" (UniqueName: \"kubernetes.io/projected/133f9908-e135-4dd8-8213-37fde5ca3e2c-kube-api-access-8l5pr\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.864767 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:58 crc kubenswrapper[4793]: I0127 21:13:58.864820 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/133f9908-e135-4dd8-8213-37fde5ca3e2c-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.294954 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xnz8n" event={"ID":"133f9908-e135-4dd8-8213-37fde5ca3e2c","Type":"ContainerDied","Data":"03fd0c3007ba12f2931b1d1a348607682b6c6a8b855168509e95f6c6807ffbd4"} Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.294985 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xnz8n" Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.295017 4793 scope.go:117] "RemoveContainer" containerID="070ce88bc60f90341fb2220c2d0f17e89c4df2068d8630b741fa7794b0330611" Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.322497 4793 scope.go:117] "RemoveContainer" containerID="4a0235cd5c4d95508bc6ca89e016e449ff5461b4413db929ebb825ec8910561a" Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.338575 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.348711 4793 scope.go:117] "RemoveContainer" containerID="5d97b3762ffdca885b29919de77f82d6ad400b70d02be817bfce30709e5fcb95" Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.351090 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xnz8n"] Jan 27 21:13:59 crc kubenswrapper[4793]: I0127 21:13:59.827931 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" path="/var/lib/kubelet/pods/133f9908-e135-4dd8-8213-37fde5ca3e2c/volumes" Jan 27 21:14:00 crc kubenswrapper[4793]: I0127 21:14:00.428912 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:14:00 crc kubenswrapper[4793]: I0127 21:14:00.482127 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:14:00 crc kubenswrapper[4793]: I0127 21:14:00.576910 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:14:00 crc kubenswrapper[4793]: I0127 21:14:00.640712 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:14:02 crc kubenswrapper[4793]: I0127 21:14:02.818088 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:14:02 crc kubenswrapper[4793]: I0127 21:14:02.819858 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jph8r" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="registry-server" containerID="cri-o://6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7" gracePeriod=2 Jan 27 21:14:02 crc kubenswrapper[4793]: I0127 21:14:02.835542 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:14:02 crc kubenswrapper[4793]: I0127 21:14:02.904860 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.248730 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.347579 4793 generic.go:334] "Generic (PLEG): container finished" podID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerID="6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7" exitCode=0 Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.349148 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jph8r" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.350705 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerDied","Data":"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7"} Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.350763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jph8r" event={"ID":"1780b173-d7b6-4fd8-80ea-926b379c3b73","Type":"ContainerDied","Data":"f97cf3d0893e27b29cb59cd9914c550aa4bfad0b4a8491ea6c730671b7b0c19a"} Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.350797 4793 scope.go:117] "RemoveContainer" containerID="6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.357752 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn8fk\" (UniqueName: \"kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk\") pod \"1780b173-d7b6-4fd8-80ea-926b379c3b73\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.358033 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content\") pod \"1780b173-d7b6-4fd8-80ea-926b379c3b73\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.359689 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities" (OuterVolumeSpecName: "utilities") pod "1780b173-d7b6-4fd8-80ea-926b379c3b73" (UID: "1780b173-d7b6-4fd8-80ea-926b379c3b73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.359793 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities\") pod \"1780b173-d7b6-4fd8-80ea-926b379c3b73\" (UID: \"1780b173-d7b6-4fd8-80ea-926b379c3b73\") " Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.360806 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.364730 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk" (OuterVolumeSpecName: "kube-api-access-xn8fk") pod "1780b173-d7b6-4fd8-80ea-926b379c3b73" (UID: "1780b173-d7b6-4fd8-80ea-926b379c3b73"). InnerVolumeSpecName "kube-api-access-xn8fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.399606 4793 scope.go:117] "RemoveContainer" containerID="498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.436584 4793 scope.go:117] "RemoveContainer" containerID="416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.448159 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1780b173-d7b6-4fd8-80ea-926b379c3b73" (UID: "1780b173-d7b6-4fd8-80ea-926b379c3b73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.463712 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn8fk\" (UniqueName: \"kubernetes.io/projected/1780b173-d7b6-4fd8-80ea-926b379c3b73-kube-api-access-xn8fk\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.463744 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1780b173-d7b6-4fd8-80ea-926b379c3b73-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.501970 4793 scope.go:117] "RemoveContainer" containerID="6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7" Jan 27 21:14:03 crc kubenswrapper[4793]: E0127 21:14:03.502481 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7\": container with ID starting with 6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7 not found: ID does not exist" containerID="6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.502572 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7"} err="failed to get container status \"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7\": rpc error: code = NotFound desc = could not find container \"6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7\": container with ID starting with 6df50521ca37d5635c039e4f3e305c6db62e081a7c478eefabeaf3deed0625e7 not found: ID does not exist" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.502603 4793 scope.go:117] "RemoveContainer" containerID="498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce" Jan 27 21:14:03 crc kubenswrapper[4793]: E0127 21:14:03.503029 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce\": container with ID starting with 498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce not found: ID does not exist" containerID="498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.503083 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce"} err="failed to get container status \"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce\": rpc error: code = NotFound desc = could not find container \"498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce\": container with ID starting with 498e3c7c25d770de2e8f6c8c8a4f9919ceeb5df08946e249bf8ab82d41fab1ce not found: ID does not exist" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.503111 4793 scope.go:117] "RemoveContainer" containerID="416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754" Jan 27 21:14:03 crc kubenswrapper[4793]: E0127 21:14:03.503451 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754\": container with ID starting with 416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754 not found: ID does not exist" containerID="416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.503474 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754"} err="failed to get container status \"416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754\": rpc error: code = NotFound desc = could not find container \"416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754\": container with ID starting with 416c87d4163c1e8cbed066915c9e931000329d7dfd2583b6519f4d670fff5754 not found: ID does not exist" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.701097 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.717411 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jph8r"] Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.804667 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:14:03 crc kubenswrapper[4793]: I0127 21:14:03.818207 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" path="/var/lib/kubelet/pods/1780b173-d7b6-4fd8-80ea-926b379c3b73/volumes" Jan 27 21:14:04 crc kubenswrapper[4793]: I0127 21:14:04.217259 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:14:04 crc kubenswrapper[4793]: I0127 21:14:04.217500 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wvx4k" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="registry-server" containerID="cri-o://5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab" gracePeriod=2 Jan 27 21:14:04 crc kubenswrapper[4793]: I0127 21:14:04.364772 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f"} Jan 27 21:14:04 crc kubenswrapper[4793]: I0127 21:14:04.928249 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.098866 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities\") pod \"bd4d8da1-88ab-427f-bccd-6187c4792725\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.099236 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content\") pod \"bd4d8da1-88ab-427f-bccd-6187c4792725\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.099276 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdsc4\" (UniqueName: \"kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4\") pod \"bd4d8da1-88ab-427f-bccd-6187c4792725\" (UID: \"bd4d8da1-88ab-427f-bccd-6187c4792725\") " Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.099954 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities" (OuterVolumeSpecName: "utilities") pod "bd4d8da1-88ab-427f-bccd-6187c4792725" (UID: "bd4d8da1-88ab-427f-bccd-6187c4792725"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.105582 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4" (OuterVolumeSpecName: "kube-api-access-xdsc4") pod "bd4d8da1-88ab-427f-bccd-6187c4792725" (UID: "bd4d8da1-88ab-427f-bccd-6187c4792725"). InnerVolumeSpecName "kube-api-access-xdsc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.124302 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd4d8da1-88ab-427f-bccd-6187c4792725" (UID: "bd4d8da1-88ab-427f-bccd-6187c4792725"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.202404 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.202448 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdsc4\" (UniqueName: \"kubernetes.io/projected/bd4d8da1-88ab-427f-bccd-6187c4792725-kube-api-access-xdsc4\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.202465 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4d8da1-88ab-427f-bccd-6187c4792725-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.377878 4793 generic.go:334] "Generic (PLEG): container finished" podID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerID="5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab" exitCode=0 Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.377919 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerDied","Data":"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab"} Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.377946 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wvx4k" event={"ID":"bd4d8da1-88ab-427f-bccd-6187c4792725","Type":"ContainerDied","Data":"ca57516e004614c79962fc4aa42ecf1e1d5d4810f185e066968330ab5e658a71"} Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.377962 4793 scope.go:117] "RemoveContainer" containerID="5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.378043 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wvx4k" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.407158 4793 scope.go:117] "RemoveContainer" containerID="63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.440183 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.467695 4793 scope.go:117] "RemoveContainer" containerID="8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.481866 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wvx4k"] Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.496249 4793 scope.go:117] "RemoveContainer" containerID="5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab" Jan 27 21:14:05 crc kubenswrapper[4793]: E0127 21:14:05.496949 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab\": container with ID starting with 5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab not found: ID does not exist" containerID="5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.497001 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab"} err="failed to get container status \"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab\": rpc error: code = NotFound desc = could not find container \"5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab\": container with ID starting with 5500a03d244dc0d21a1d4a8d29deca1a8aaa139ea2d9ed238a02a86d3adba2ab not found: ID does not exist" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.497034 4793 scope.go:117] "RemoveContainer" containerID="63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c" Jan 27 21:14:05 crc kubenswrapper[4793]: E0127 21:14:05.500039 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c\": container with ID starting with 63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c not found: ID does not exist" containerID="63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.500126 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c"} err="failed to get container status \"63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c\": rpc error: code = NotFound desc = could not find container \"63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c\": container with ID starting with 63fa0e40e9367a5d84433f26a401966b9355160e3788cdeb4287547db33d4e6c not found: ID does not exist" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.500157 4793 scope.go:117] "RemoveContainer" containerID="8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda" Jan 27 21:14:05 crc kubenswrapper[4793]: E0127 21:14:05.500656 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda\": container with ID starting with 8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda not found: ID does not exist" containerID="8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.500692 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda"} err="failed to get container status \"8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda\": rpc error: code = NotFound desc = could not find container \"8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda\": container with ID starting with 8e21f94bf793abc5bb56310c44d1a79c709061cc4bd3f72820bc41af26e34fda not found: ID does not exist" Jan 27 21:14:05 crc kubenswrapper[4793]: I0127 21:14:05.817019 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" path="/var/lib/kubelet/pods/bd4d8da1-88ab-427f-bccd-6187c4792725/volumes" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.149537 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150407 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150588 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150620 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150630 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150651 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150658 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150671 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150679 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150703 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150714 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150729 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150737 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150752 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150759 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150769 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150776 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="extract-utilities" Jan 27 21:14:06 crc kubenswrapper[4793]: E0127 21:14:06.150785 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.150792 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="extract-content" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.151047 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1780b173-d7b6-4fd8-80ea-926b379c3b73" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.151068 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd4d8da1-88ab-427f-bccd-6187c4792725" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.151086 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="133f9908-e135-4dd8-8213-37fde5ca3e2c" containerName="registry-server" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.152472 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.161600 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.215733 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.312501 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.314221 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.316119 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.323802 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328088 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328151 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328175 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-scripts\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328216 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328238 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328260 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-sys\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328299 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-587hp\" (UniqueName: \"kubernetes.io/projected/c765d809-ba2f-47e7-a54c-e6140a738c8d-kube-api-access-587hp\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328317 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328337 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-run\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328353 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328393 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-dev\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328411 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328462 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-lib-modules\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328488 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.328504 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.361140 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.362968 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.365104 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.396099 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.431842 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-dev\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.431891 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.431952 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.431980 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432018 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432040 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432083 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432098 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432113 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432128 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-lib-modules\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432177 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432195 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432214 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432236 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432276 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432299 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-scripts\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432321 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432361 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432384 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432400 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432423 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-sys\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432482 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jhmb\" (UniqueName: \"kubernetes.io/projected/768192cb-07c6-4ce7-b090-7d277a7c4d58-kube-api-access-8jhmb\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432523 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432630 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-587hp\" (UniqueName: \"kubernetes.io/projected/c765d809-ba2f-47e7-a54c-e6140a738c8d-kube-api-access-587hp\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432650 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432674 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-run\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432815 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432833 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432851 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.432891 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.433032 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-dev\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.433960 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.434092 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-lib-modules\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.434193 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.434200 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.434492 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-sys\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.435343 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.435696 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-run\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.435819 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-etc-nvme\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.435889 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c765d809-ba2f-47e7-a54c-e6140a738c8d-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.441320 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data-custom\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.448148 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-scripts\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.449241 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-config-data\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.457397 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765d809-ba2f-47e7-a54c-e6140a738c8d-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.465039 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-587hp\" (UniqueName: \"kubernetes.io/projected/c765d809-ba2f-47e7-a54c-e6140a738c8d-kube-api-access-587hp\") pod \"cinder-backup-0\" (UID: \"c765d809-ba2f-47e7-a54c-e6140a738c8d\") " pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534297 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534582 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534626 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-sys\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534661 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534681 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534699 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534728 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534745 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534760 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-dev\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534799 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534826 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534842 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534865 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534885 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.534921 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535091 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535101 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535160 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535342 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535403 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535475 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mp4f\" (UniqueName: \"kubernetes.io/projected/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-kube-api-access-2mp4f\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535521 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535593 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535620 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535646 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535660 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535664 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535760 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535808 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535801 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535865 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-run\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535894 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jhmb\" (UniqueName: \"kubernetes.io/projected/768192cb-07c6-4ce7-b090-7d277a7c4d58-kube-api-access-8jhmb\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535947 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.535986 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.536009 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.536025 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.536056 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.536136 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.536188 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/768192cb-07c6-4ce7-b090-7d277a7c4d58-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.539828 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.540428 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.540493 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.540520 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768192cb-07c6-4ce7-b090-7d277a7c4d58-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.552751 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.559038 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jhmb\" (UniqueName: \"kubernetes.io/projected/768192cb-07c6-4ce7-b090-7d277a7c4d58-kube-api-access-8jhmb\") pod \"cinder-volume-nfs-2-0\" (UID: \"768192cb-07c6-4ce7-b090-7d277a7c4d58\") " pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.625976 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.626207 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wbrzc" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="registry-server" containerID="cri-o://9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656" gracePeriod=2 Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.635614 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638104 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638162 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638196 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638220 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-sys\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638257 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638272 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-dev\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638298 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638330 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638369 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638390 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mp4f\" (UniqueName: \"kubernetes.io/projected/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-kube-api-access-2mp4f\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638411 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638434 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638453 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638480 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638519 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-run\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638636 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-run\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.638707 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.639226 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-dev\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.639291 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640024 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640075 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640103 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-sys\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640128 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640351 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.640385 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.645204 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.645637 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.648154 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.652848 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.655719 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mp4f\" (UniqueName: \"kubernetes.io/projected/37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0-kube-api-access-2mp4f\") pod \"cinder-volume-nfs-0\" (UID: \"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0\") " pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:06 crc kubenswrapper[4793]: I0127 21:14:06.682771 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.107999 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.202648 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.262360 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stpcc\" (UniqueName: \"kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc\") pod \"b96c45ee-df43-464e-98e6-021beeb75939\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.262459 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content\") pod \"b96c45ee-df43-464e-98e6-021beeb75939\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.262513 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities\") pod \"b96c45ee-df43-464e-98e6-021beeb75939\" (UID: \"b96c45ee-df43-464e-98e6-021beeb75939\") " Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.263757 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities" (OuterVolumeSpecName: "utilities") pod "b96c45ee-df43-464e-98e6-021beeb75939" (UID: "b96c45ee-df43-464e-98e6-021beeb75939"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.268154 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc" (OuterVolumeSpecName: "kube-api-access-stpcc") pod "b96c45ee-df43-464e-98e6-021beeb75939" (UID: "b96c45ee-df43-464e-98e6-021beeb75939"). InnerVolumeSpecName "kube-api-access-stpcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.365738 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.365772 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stpcc\" (UniqueName: \"kubernetes.io/projected/b96c45ee-df43-464e-98e6-021beeb75939-kube-api-access-stpcc\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.407728 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b96c45ee-df43-464e-98e6-021beeb75939" (UID: "b96c45ee-df43-464e-98e6-021beeb75939"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.417781 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" exitCode=1 Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.417837 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f"} Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.417873 4793 scope.go:117] "RemoveContainer" containerID="e1e8fc1ec34a60aa1605dda5ef0f6a0d6276a1003dcfd6951422c669891edf55" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.418632 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:07 crc kubenswrapper[4793]: E0127 21:14:07.418892 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.420791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"c765d809-ba2f-47e7-a54c-e6140a738c8d","Type":"ContainerStarted","Data":"beacd04973316c9115fbeb38507484e82871f98e26d4b70645fb4992d44ce203"} Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.422103 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.423413 4793 generic.go:334] "Generic (PLEG): container finished" podID="b96c45ee-df43-464e-98e6-021beeb75939" containerID="9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656" exitCode=0 Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.423453 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerDied","Data":"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656"} Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.423469 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbrzc" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.423478 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbrzc" event={"ID":"b96c45ee-df43-464e-98e6-021beeb75939","Type":"ContainerDied","Data":"7de039506bae5f19b2ef6f8f675c7dd515a23154e3fd1f1cbf0c600fd905428d"} Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.468987 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b96c45ee-df43-464e-98e6-021beeb75939-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.508065 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.599305 4793 scope.go:117] "RemoveContainer" containerID="9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.705373 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.714405 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wbrzc"] Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.772847 4793 scope.go:117] "RemoveContainer" containerID="e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.816782 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b96c45ee-df43-464e-98e6-021beeb75939" path="/var/lib/kubelet/pods/b96c45ee-df43-464e-98e6-021beeb75939/volumes" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.851112 4793 scope.go:117] "RemoveContainer" containerID="f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.878589 4793 scope.go:117] "RemoveContainer" containerID="9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656" Jan 27 21:14:07 crc kubenswrapper[4793]: E0127 21:14:07.878930 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656\": container with ID starting with 9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656 not found: ID does not exist" containerID="9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.878969 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656"} err="failed to get container status \"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656\": rpc error: code = NotFound desc = could not find container \"9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656\": container with ID starting with 9e10f15a08bcb7d4ab18775f90949bb56c1d3fd3b6dad5197b2d992609e7d656 not found: ID does not exist" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.879000 4793 scope.go:117] "RemoveContainer" containerID="e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462" Jan 27 21:14:07 crc kubenswrapper[4793]: E0127 21:14:07.880759 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462\": container with ID starting with e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462 not found: ID does not exist" containerID="e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.880810 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462"} err="failed to get container status \"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462\": rpc error: code = NotFound desc = could not find container \"e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462\": container with ID starting with e12c8334a9dddb331419061b67366239549f897d5f1c2b42deda7ba3d9dd5462 not found: ID does not exist" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.880847 4793 scope.go:117] "RemoveContainer" containerID="f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c" Jan 27 21:14:07 crc kubenswrapper[4793]: E0127 21:14:07.881292 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c\": container with ID starting with f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c not found: ID does not exist" containerID="f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c" Jan 27 21:14:07 crc kubenswrapper[4793]: I0127 21:14:07.881338 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c"} err="failed to get container status \"f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c\": rpc error: code = NotFound desc = could not find container \"f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c\": container with ID starting with f66e9a3b1bb273cd57052c298cda79e0988543aa955f917545c768949757795c not found: ID does not exist" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.243685 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.244314 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.244328 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.244337 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.438534 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"c765d809-ba2f-47e7-a54c-e6140a738c8d","Type":"ContainerStarted","Data":"40d5c8a2e1bbd250d31a8792d1f93706f8e68b7d50442acd7276707642b6c82b"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.438609 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"c765d809-ba2f-47e7-a54c-e6140a738c8d","Type":"ContainerStarted","Data":"dc6531eeeace89ba00a2a18257138f7d0087db7bc5a7aa490cde31112008cf83"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.460831 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0","Type":"ContainerStarted","Data":"d556eda9ae99b64f77fb95f5a11a4fc8e4c2807a162819bea3467ef4cb975658"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.461032 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0","Type":"ContainerStarted","Data":"1e1d86d4f522399b8ecabe270f381879349ccfbc0bfce600a3560de26bd6f631"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.461234 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0","Type":"ContainerStarted","Data":"d03c22c292e8457b64594c22a426a843ba3795fbb381a0a9eeea40f7d7ee7571"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.476489 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.108079295 podStartE2EDuration="2.47646646s" podCreationTimestamp="2026-01-27 21:14:06 +0000 UTC" firstStartedPulling="2026-01-27 21:14:07.204724819 +0000 UTC m=+4272.594977975" lastFinishedPulling="2026-01-27 21:14:07.573111984 +0000 UTC m=+4272.963365140" observedRunningTime="2026-01-27 21:14:08.465096521 +0000 UTC m=+4273.855349697" watchObservedRunningTime="2026-01-27 21:14:08.47646646 +0000 UTC m=+4273.866719616" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.479809 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"768192cb-07c6-4ce7-b090-7d277a7c4d58","Type":"ContainerStarted","Data":"0b294b3a4206648a05d2ae324f818074f542608d24a3e6bc141ad3af4be80975"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.479840 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"768192cb-07c6-4ce7-b090-7d277a7c4d58","Type":"ContainerStarted","Data":"b7022f2374bf3fc9af0a0f53bcd7c0ab0e761b9d4fca37de5af8d131ffe2ff05"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.479849 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"768192cb-07c6-4ce7-b090-7d277a7c4d58","Type":"ContainerStarted","Data":"dc3cc5882f9635709c52d255bfc4a208ed229bffa3b480dad0aad434926089c4"} Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.483151 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:08 crc kubenswrapper[4793]: E0127 21:14:08.483377 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.491607 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-0" podStartSLOduration=2.209499818 podStartE2EDuration="2.491589692s" podCreationTimestamp="2026-01-27 21:14:06 +0000 UTC" firstStartedPulling="2026-01-27 21:14:07.570619124 +0000 UTC m=+4272.960872280" lastFinishedPulling="2026-01-27 21:14:07.852708998 +0000 UTC m=+4273.242962154" observedRunningTime="2026-01-27 21:14:08.490836494 +0000 UTC m=+4273.881089650" watchObservedRunningTime="2026-01-27 21:14:08.491589692 +0000 UTC m=+4273.881842848" Jan 27 21:14:08 crc kubenswrapper[4793]: I0127 21:14:08.527617 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-2-0" podStartSLOduration=2.244743995 podStartE2EDuration="2.527595508s" podCreationTimestamp="2026-01-27 21:14:06 +0000 UTC" firstStartedPulling="2026-01-27 21:14:07.569822614 +0000 UTC m=+4272.960075770" lastFinishedPulling="2026-01-27 21:14:07.852674127 +0000 UTC m=+4273.242927283" observedRunningTime="2026-01-27 21:14:08.515942781 +0000 UTC m=+4273.906195937" watchObservedRunningTime="2026-01-27 21:14:08.527595508 +0000 UTC m=+4273.917848664" Jan 27 21:14:11 crc kubenswrapper[4793]: I0127 21:14:11.554268 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Jan 27 21:14:11 crc kubenswrapper[4793]: I0127 21:14:11.636410 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:11 crc kubenswrapper[4793]: I0127 21:14:11.683855 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:16 crc kubenswrapper[4793]: I0127 21:14:16.914658 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Jan 27 21:14:16 crc kubenswrapper[4793]: I0127 21:14:16.916846 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Jan 27 21:14:16 crc kubenswrapper[4793]: I0127 21:14:16.917772 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Jan 27 21:14:20 crc kubenswrapper[4793]: I0127 21:14:20.804101 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:20 crc kubenswrapper[4793]: E0127 21:14:20.805126 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:14:22 crc kubenswrapper[4793]: I0127 21:14:22.753836 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:14:22 crc kubenswrapper[4793]: I0127 21:14:22.754127 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:14:32 crc kubenswrapper[4793]: I0127 21:14:32.804772 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:32 crc kubenswrapper[4793]: E0127 21:14:32.805812 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:14:47 crc kubenswrapper[4793]: I0127 21:14:47.804286 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:47 crc kubenswrapper[4793]: E0127 21:14:47.807822 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:14:52 crc kubenswrapper[4793]: I0127 21:14:52.754070 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:14:52 crc kubenswrapper[4793]: I0127 21:14:52.754703 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:14:58 crc kubenswrapper[4793]: I0127 21:14:58.803746 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:14:58 crc kubenswrapper[4793]: E0127 21:14:58.804637 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.233940 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd"] Jan 27 21:15:00 crc kubenswrapper[4793]: E0127 21:15:00.234476 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="extract-content" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.234494 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="extract-content" Jan 27 21:15:00 crc kubenswrapper[4793]: E0127 21:15:00.234536 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="registry-server" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.234618 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="registry-server" Jan 27 21:15:00 crc kubenswrapper[4793]: E0127 21:15:00.234642 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="extract-utilities" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.234652 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="extract-utilities" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.234933 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96c45ee-df43-464e-98e6-021beeb75939" containerName="registry-server" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.235801 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.244253 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.244501 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.247271 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd"] Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.363676 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.364037 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.364497 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g49w2\" (UniqueName: \"kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.466699 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g49w2\" (UniqueName: \"kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.466788 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.466866 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.467909 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.487034 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.500046 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g49w2\" (UniqueName: \"kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2\") pod \"collect-profiles-29492475-vftgd\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:00 crc kubenswrapper[4793]: I0127 21:15:00.573523 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:01 crc kubenswrapper[4793]: I0127 21:15:01.081842 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd"] Jan 27 21:15:01 crc kubenswrapper[4793]: I0127 21:15:01.112698 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" event={"ID":"2b61115a-e157-4d07-bcfe-2440b5a4ff2c","Type":"ContainerStarted","Data":"75523ae283cf2d4de0a6d8cb805b3c72ab18e05071450bbe63a0e68943d6c9a0"} Jan 27 21:15:02 crc kubenswrapper[4793]: I0127 21:15:02.124830 4793 generic.go:334] "Generic (PLEG): container finished" podID="2b61115a-e157-4d07-bcfe-2440b5a4ff2c" containerID="8efd72baeacbc9cbc3af34ef491da75a938404e78e7bc0db78259f0b48bc16a7" exitCode=0 Jan 27 21:15:02 crc kubenswrapper[4793]: I0127 21:15:02.124923 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" event={"ID":"2b61115a-e157-4d07-bcfe-2440b5a4ff2c","Type":"ContainerDied","Data":"8efd72baeacbc9cbc3af34ef491da75a938404e78e7bc0db78259f0b48bc16a7"} Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.512051 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.599891 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g49w2\" (UniqueName: \"kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2\") pod \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.600397 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume\") pod \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.600844 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume" (OuterVolumeSpecName: "config-volume") pod "2b61115a-e157-4d07-bcfe-2440b5a4ff2c" (UID: "2b61115a-e157-4d07-bcfe-2440b5a4ff2c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.600796 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume\") pod \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\" (UID: \"2b61115a-e157-4d07-bcfe-2440b5a4ff2c\") " Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.601929 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.606031 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2" (OuterVolumeSpecName: "kube-api-access-g49w2") pod "2b61115a-e157-4d07-bcfe-2440b5a4ff2c" (UID: "2b61115a-e157-4d07-bcfe-2440b5a4ff2c"). InnerVolumeSpecName "kube-api-access-g49w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.606464 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2b61115a-e157-4d07-bcfe-2440b5a4ff2c" (UID: "2b61115a-e157-4d07-bcfe-2440b5a4ff2c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.704479 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:15:03 crc kubenswrapper[4793]: I0127 21:15:03.704524 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g49w2\" (UniqueName: \"kubernetes.io/projected/2b61115a-e157-4d07-bcfe-2440b5a4ff2c-kube-api-access-g49w2\") on node \"crc\" DevicePath \"\"" Jan 27 21:15:04 crc kubenswrapper[4793]: I0127 21:15:04.153461 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" event={"ID":"2b61115a-e157-4d07-bcfe-2440b5a4ff2c","Type":"ContainerDied","Data":"75523ae283cf2d4de0a6d8cb805b3c72ab18e05071450bbe63a0e68943d6c9a0"} Jan 27 21:15:04 crc kubenswrapper[4793]: I0127 21:15:04.153783 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75523ae283cf2d4de0a6d8cb805b3c72ab18e05071450bbe63a0e68943d6c9a0" Jan 27 21:15:04 crc kubenswrapper[4793]: I0127 21:15:04.153645 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd" Jan 27 21:15:04 crc kubenswrapper[4793]: I0127 21:15:04.606727 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7"] Jan 27 21:15:04 crc kubenswrapper[4793]: I0127 21:15:04.616579 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492430-v56k7"] Jan 27 21:15:05 crc kubenswrapper[4793]: I0127 21:15:05.825665 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61e6d950-0948-4021-8d5b-a8578c9a2326" path="/var/lib/kubelet/pods/61e6d950-0948-4021-8d5b-a8578c9a2326/volumes" Jan 27 21:15:11 crc kubenswrapper[4793]: I0127 21:15:11.804123 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:15:11 crc kubenswrapper[4793]: E0127 21:15:11.805044 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:15:22 crc kubenswrapper[4793]: I0127 21:15:22.753451 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:15:22 crc kubenswrapper[4793]: I0127 21:15:22.754063 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:15:22 crc kubenswrapper[4793]: I0127 21:15:22.754131 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:15:22 crc kubenswrapper[4793]: I0127 21:15:22.755317 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:15:22 crc kubenswrapper[4793]: I0127 21:15:22.755424 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e" gracePeriod=600 Jan 27 21:15:23 crc kubenswrapper[4793]: I0127 21:15:23.387682 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e" exitCode=0 Jan 27 21:15:23 crc kubenswrapper[4793]: I0127 21:15:23.387739 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e"} Jan 27 21:15:23 crc kubenswrapper[4793]: I0127 21:15:23.387820 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb"} Jan 27 21:15:23 crc kubenswrapper[4793]: I0127 21:15:23.387841 4793 scope.go:117] "RemoveContainer" containerID="8e640ba53e2fd0e559d6316eae107165c34aa1a750588bf7c981435eed2ff245" Jan 27 21:15:26 crc kubenswrapper[4793]: I0127 21:15:26.803983 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:15:26 crc kubenswrapper[4793]: E0127 21:15:26.805064 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:15:36 crc kubenswrapper[4793]: I0127 21:15:36.131350 4793 scope.go:117] "RemoveContainer" containerID="e48ded758c71fd0e185eb1d22ae5c95425af9d7111522306ecb097d7bab7889f" Jan 27 21:15:40 crc kubenswrapper[4793]: I0127 21:15:40.803428 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:15:40 crc kubenswrapper[4793]: E0127 21:15:40.804252 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:15:51 crc kubenswrapper[4793]: I0127 21:15:51.803061 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:15:51 crc kubenswrapper[4793]: E0127 21:15:51.803923 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:16:06 crc kubenswrapper[4793]: I0127 21:16:06.804614 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:16:06 crc kubenswrapper[4793]: E0127 21:16:06.805645 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:16:17 crc kubenswrapper[4793]: I0127 21:16:17.803419 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:16:17 crc kubenswrapper[4793]: E0127 21:16:17.804147 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:16:30 crc kubenswrapper[4793]: I0127 21:16:30.804354 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:16:30 crc kubenswrapper[4793]: E0127 21:16:30.806032 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:16:43 crc kubenswrapper[4793]: I0127 21:16:43.803695 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:16:43 crc kubenswrapper[4793]: E0127 21:16:43.804346 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:16:58 crc kubenswrapper[4793]: I0127 21:16:58.803348 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:16:58 crc kubenswrapper[4793]: E0127 21:16:58.804428 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:17:07 crc kubenswrapper[4793]: I0127 21:17:07.267884 4793 trace.go:236] Trace[453164960]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-nqzlb" (27-Jan-2026 21:16:59.855) (total time: 7412ms): Jan 27 21:17:07 crc kubenswrapper[4793]: Trace[453164960]: [7.41253663s] [7.41253663s] END Jan 27 21:17:10 crc kubenswrapper[4793]: I0127 21:17:10.803626 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:17:10 crc kubenswrapper[4793]: E0127 21:17:10.804398 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:17:22 crc kubenswrapper[4793]: I0127 21:17:22.807195 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:17:22 crc kubenswrapper[4793]: E0127 21:17:22.809562 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:17:36 crc kubenswrapper[4793]: I0127 21:17:36.803754 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:17:36 crc kubenswrapper[4793]: E0127 21:17:36.804669 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:17:50 crc kubenswrapper[4793]: I0127 21:17:50.804339 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:17:50 crc kubenswrapper[4793]: E0127 21:17:50.805518 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:17:52 crc kubenswrapper[4793]: I0127 21:17:52.753439 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:17:52 crc kubenswrapper[4793]: I0127 21:17:52.753519 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:18:05 crc kubenswrapper[4793]: I0127 21:18:05.818864 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:18:05 crc kubenswrapper[4793]: E0127 21:18:05.819800 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:18:18 crc kubenswrapper[4793]: I0127 21:18:18.803245 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:18:18 crc kubenswrapper[4793]: E0127 21:18:18.804083 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:18:22 crc kubenswrapper[4793]: I0127 21:18:22.753174 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:18:22 crc kubenswrapper[4793]: I0127 21:18:22.755215 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:18:33 crc kubenswrapper[4793]: I0127 21:18:33.804281 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:18:33 crc kubenswrapper[4793]: E0127 21:18:33.805133 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:18:45 crc kubenswrapper[4793]: I0127 21:18:45.809979 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:18:45 crc kubenswrapper[4793]: E0127 21:18:45.811568 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:18:48 crc kubenswrapper[4793]: I0127 21:18:48.241091 4793 trace.go:236] Trace[248727524]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-nqzlb" (27-Jan-2026 21:18:46.883) (total time: 1357ms): Jan 27 21:18:48 crc kubenswrapper[4793]: Trace[248727524]: [1.357683589s] [1.357683589s] END Jan 27 21:18:52 crc kubenswrapper[4793]: I0127 21:18:52.753068 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:18:52 crc kubenswrapper[4793]: I0127 21:18:52.753525 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:18:52 crc kubenswrapper[4793]: I0127 21:18:52.753623 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:18:52 crc kubenswrapper[4793]: I0127 21:18:52.754105 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:18:52 crc kubenswrapper[4793]: I0127 21:18:52.754164 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" gracePeriod=600 Jan 27 21:18:52 crc kubenswrapper[4793]: E0127 21:18:52.887066 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:18:53 crc kubenswrapper[4793]: I0127 21:18:53.163187 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" exitCode=0 Jan 27 21:18:53 crc kubenswrapper[4793]: I0127 21:18:53.163269 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb"} Jan 27 21:18:53 crc kubenswrapper[4793]: I0127 21:18:53.163344 4793 scope.go:117] "RemoveContainer" containerID="182fdabdfe5ed76bc8cf20051a045117330b8f24b6a69106baed4d343976b76e" Jan 27 21:18:53 crc kubenswrapper[4793]: I0127 21:18:53.165015 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:18:53 crc kubenswrapper[4793]: E0127 21:18:53.165363 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:18:59 crc kubenswrapper[4793]: I0127 21:18:59.811241 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:18:59 crc kubenswrapper[4793]: E0127 21:18:59.812198 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:19:06 crc kubenswrapper[4793]: I0127 21:19:06.803386 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:19:06 crc kubenswrapper[4793]: E0127 21:19:06.804388 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:19:14 crc kubenswrapper[4793]: I0127 21:19:14.804278 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:19:15 crc kubenswrapper[4793]: I0127 21:19:15.460816 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e"} Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.243109 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.243889 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:19:18 crc kubenswrapper[4793]: E0127 21:19:18.243999 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e is running failed: container process not found" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:19:18 crc kubenswrapper[4793]: E0127 21:19:18.244968 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e is running failed: container process not found" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:19:18 crc kubenswrapper[4793]: E0127 21:19:18.245563 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e is running failed: container process not found" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:19:18 crc kubenswrapper[4793]: E0127 21:19:18.245626 4793 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e is running failed: container process not found" probeType="Startup" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerName="watcher-applier" Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.909263 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" exitCode=1 Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.909308 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e"} Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.909346 4793 scope.go:117] "RemoveContainer" containerID="0cd3dcd7e92127bea7a33e4028e308cdb2e0c3e9f221edc553a8ca2e0d78756f" Jan 27 21:19:18 crc kubenswrapper[4793]: I0127 21:19:18.910129 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:19:18 crc kubenswrapper[4793]: E0127 21:19:18.910439 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:19:21 crc kubenswrapper[4793]: I0127 21:19:21.803842 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:19:21 crc kubenswrapper[4793]: E0127 21:19:21.804881 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:19:28 crc kubenswrapper[4793]: I0127 21:19:28.242408 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:19:28 crc kubenswrapper[4793]: I0127 21:19:28.242924 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:19:28 crc kubenswrapper[4793]: I0127 21:19:28.243726 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:19:28 crc kubenswrapper[4793]: E0127 21:19:28.243989 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:19:32 crc kubenswrapper[4793]: I0127 21:19:32.803054 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:19:32 crc kubenswrapper[4793]: E0127 21:19:32.803825 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:19:41 crc kubenswrapper[4793]: I0127 21:19:41.908667 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:19:41 crc kubenswrapper[4793]: E0127 21:19:41.913040 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:19:45 crc kubenswrapper[4793]: I0127 21:19:45.815436 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:19:45 crc kubenswrapper[4793]: E0127 21:19:45.816407 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:19:56 crc kubenswrapper[4793]: I0127 21:19:56.804018 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:19:56 crc kubenswrapper[4793]: I0127 21:19:56.804728 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:19:56 crc kubenswrapper[4793]: E0127 21:19:56.804955 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:19:56 crc kubenswrapper[4793]: E0127 21:19:56.805057 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:20:07 crc kubenswrapper[4793]: I0127 21:20:07.803364 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:20:07 crc kubenswrapper[4793]: E0127 21:20:07.804161 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:20:07 crc kubenswrapper[4793]: I0127 21:20:07.804307 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:20:07 crc kubenswrapper[4793]: E0127 21:20:07.804517 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:20:19 crc kubenswrapper[4793]: I0127 21:20:19.803953 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:20:19 crc kubenswrapper[4793]: E0127 21:20:19.804803 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:20:22 crc kubenswrapper[4793]: I0127 21:20:22.804721 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:20:22 crc kubenswrapper[4793]: E0127 21:20:22.805385 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:20:32 crc kubenswrapper[4793]: I0127 21:20:32.804888 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:20:32 crc kubenswrapper[4793]: E0127 21:20:32.807791 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:20:36 crc kubenswrapper[4793]: I0127 21:20:36.803251 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:20:36 crc kubenswrapper[4793]: E0127 21:20:36.804028 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:20:43 crc kubenswrapper[4793]: I0127 21:20:43.804605 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:20:43 crc kubenswrapper[4793]: E0127 21:20:43.805699 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:20:51 crc kubenswrapper[4793]: I0127 21:20:51.810148 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:20:51 crc kubenswrapper[4793]: E0127 21:20:51.811291 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:20:57 crc kubenswrapper[4793]: I0127 21:20:57.803930 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:20:57 crc kubenswrapper[4793]: E0127 21:20:57.805037 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:21:06 crc kubenswrapper[4793]: I0127 21:21:06.803781 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:21:06 crc kubenswrapper[4793]: E0127 21:21:06.804506 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:21:12 crc kubenswrapper[4793]: I0127 21:21:12.803747 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:21:12 crc kubenswrapper[4793]: E0127 21:21:12.804484 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:21:21 crc kubenswrapper[4793]: I0127 21:21:21.803831 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:21:21 crc kubenswrapper[4793]: E0127 21:21:21.806174 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:21:25 crc kubenswrapper[4793]: I0127 21:21:25.815793 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:21:25 crc kubenswrapper[4793]: E0127 21:21:25.816796 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:21:35 crc kubenswrapper[4793]: I0127 21:21:35.822860 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:21:35 crc kubenswrapper[4793]: E0127 21:21:35.824098 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:21:36 crc kubenswrapper[4793]: I0127 21:21:36.803617 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:21:36 crc kubenswrapper[4793]: E0127 21:21:36.804318 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:21:46 crc kubenswrapper[4793]: I0127 21:21:46.803410 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:21:46 crc kubenswrapper[4793]: E0127 21:21:46.804572 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:21:51 crc kubenswrapper[4793]: I0127 21:21:51.803152 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:21:51 crc kubenswrapper[4793]: E0127 21:21:51.803759 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:22:01 crc kubenswrapper[4793]: I0127 21:22:01.804702 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:22:01 crc kubenswrapper[4793]: E0127 21:22:01.805800 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:22:06 crc kubenswrapper[4793]: I0127 21:22:06.804198 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:22:06 crc kubenswrapper[4793]: E0127 21:22:06.804972 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:22:13 crc kubenswrapper[4793]: I0127 21:22:13.805710 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:22:13 crc kubenswrapper[4793]: E0127 21:22:13.807106 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:22:18 crc kubenswrapper[4793]: I0127 21:22:18.804318 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:22:18 crc kubenswrapper[4793]: E0127 21:22:18.805104 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:22:27 crc kubenswrapper[4793]: I0127 21:22:27.817787 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:22:27 crc kubenswrapper[4793]: E0127 21:22:27.818850 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:22:32 crc kubenswrapper[4793]: I0127 21:22:32.804367 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:22:32 crc kubenswrapper[4793]: E0127 21:22:32.805584 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:22:38 crc kubenswrapper[4793]: I0127 21:22:38.803475 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:22:38 crc kubenswrapper[4793]: E0127 21:22:38.804121 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:22:44 crc kubenswrapper[4793]: I0127 21:22:44.804184 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:22:44 crc kubenswrapper[4793]: E0127 21:22:44.804938 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:22:52 crc kubenswrapper[4793]: I0127 21:22:52.803380 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:22:52 crc kubenswrapper[4793]: E0127 21:22:52.804287 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:22:57 crc kubenswrapper[4793]: I0127 21:22:57.804350 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:22:57 crc kubenswrapper[4793]: E0127 21:22:57.805479 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:23:04 crc kubenswrapper[4793]: I0127 21:23:04.803908 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:23:04 crc kubenswrapper[4793]: E0127 21:23:04.804581 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:23:09 crc kubenswrapper[4793]: I0127 21:23:09.804937 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:23:09 crc kubenswrapper[4793]: E0127 21:23:09.805563 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:23:16 crc kubenswrapper[4793]: I0127 21:23:16.802938 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:23:16 crc kubenswrapper[4793]: E0127 21:23:16.805114 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:23:20 crc kubenswrapper[4793]: I0127 21:23:20.804123 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:23:20 crc kubenswrapper[4793]: E0127 21:23:20.805079 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:23:29 crc kubenswrapper[4793]: I0127 21:23:29.803668 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:23:29 crc kubenswrapper[4793]: E0127 21:23:29.805427 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:23:34 crc kubenswrapper[4793]: I0127 21:23:34.803227 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:23:34 crc kubenswrapper[4793]: E0127 21:23:34.804337 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:23:42 crc kubenswrapper[4793]: I0127 21:23:42.803481 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:23:42 crc kubenswrapper[4793]: E0127 21:23:42.804146 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:23:45 crc kubenswrapper[4793]: I0127 21:23:45.841064 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:23:45 crc kubenswrapper[4793]: E0127 21:23:45.842965 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.062976 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:23:52 crc kubenswrapper[4793]: E0127 21:23:52.064188 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b61115a-e157-4d07-bcfe-2440b5a4ff2c" containerName="collect-profiles" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.064208 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b61115a-e157-4d07-bcfe-2440b5a4ff2c" containerName="collect-profiles" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.064501 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b61115a-e157-4d07-bcfe-2440b5a4ff2c" containerName="collect-profiles" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.066945 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.075229 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.147807 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.147862 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.148274 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6wkk\" (UniqueName: \"kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.250833 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.250886 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.251075 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6wkk\" (UniqueName: \"kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.251488 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.251608 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.272496 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6wkk\" (UniqueName: \"kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk\") pod \"redhat-marketplace-spbsl\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.405061 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:23:52 crc kubenswrapper[4793]: I0127 21:23:52.923744 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:23:52 crc kubenswrapper[4793]: W0127 21:23:52.926580 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa251902_f4bb_48d4_b1ba_cb14c6e5a19e.slice/crio-50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff WatchSource:0}: Error finding container 50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff: Status 404 returned error can't find the container with id 50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff Jan 27 21:23:53 crc kubenswrapper[4793]: I0127 21:23:53.596409 4793 generic.go:334] "Generic (PLEG): container finished" podID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerID="2d7114e9932dc3f36e5e66f5002f588078a2aebb67622cdad1534d36532cb8ce" exitCode=0 Jan 27 21:23:53 crc kubenswrapper[4793]: I0127 21:23:53.596491 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerDied","Data":"2d7114e9932dc3f36e5e66f5002f588078a2aebb67622cdad1534d36532cb8ce"} Jan 27 21:23:53 crc kubenswrapper[4793]: I0127 21:23:53.596743 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerStarted","Data":"50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff"} Jan 27 21:23:53 crc kubenswrapper[4793]: I0127 21:23:53.598907 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:23:54 crc kubenswrapper[4793]: I0127 21:23:54.616064 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerStarted","Data":"9970dc4f78c2d2ba4a97756179dc64435951717ad08568b6c2665e9239dfe5b5"} Jan 27 21:23:55 crc kubenswrapper[4793]: I0127 21:23:55.654086 4793 generic.go:334] "Generic (PLEG): container finished" podID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerID="9970dc4f78c2d2ba4a97756179dc64435951717ad08568b6c2665e9239dfe5b5" exitCode=0 Jan 27 21:23:55 crc kubenswrapper[4793]: I0127 21:23:55.654169 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerDied","Data":"9970dc4f78c2d2ba4a97756179dc64435951717ad08568b6c2665e9239dfe5b5"} Jan 27 21:23:56 crc kubenswrapper[4793]: I0127 21:23:56.668357 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerStarted","Data":"c710cbd0c86fc871bfbc65c4ff12064e33cd481360c7ac01c88c14ed63064520"} Jan 27 21:23:56 crc kubenswrapper[4793]: I0127 21:23:56.693611 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-spbsl" podStartSLOduration=3.212884289 podStartE2EDuration="5.693571915s" podCreationTimestamp="2026-01-27 21:23:51 +0000 UTC" firstStartedPulling="2026-01-27 21:23:53.598605168 +0000 UTC m=+4858.988858324" lastFinishedPulling="2026-01-27 21:23:56.079292794 +0000 UTC m=+4861.469545950" observedRunningTime="2026-01-27 21:23:56.689806963 +0000 UTC m=+4862.080060169" watchObservedRunningTime="2026-01-27 21:23:56.693571915 +0000 UTC m=+4862.083825101" Jan 27 21:23:56 crc kubenswrapper[4793]: I0127 21:23:56.803907 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:23:56 crc kubenswrapper[4793]: E0127 21:23:56.804226 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:23:58 crc kubenswrapper[4793]: I0127 21:23:58.803310 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:23:59 crc kubenswrapper[4793]: I0127 21:23:59.714539 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637"} Jan 27 21:24:02 crc kubenswrapper[4793]: I0127 21:24:02.405371 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:02 crc kubenswrapper[4793]: I0127 21:24:02.405943 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:02 crc kubenswrapper[4793]: I0127 21:24:02.472403 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:02 crc kubenswrapper[4793]: I0127 21:24:02.798295 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:02 crc kubenswrapper[4793]: I0127 21:24:02.870748 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:24:04 crc kubenswrapper[4793]: I0127 21:24:04.760808 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-spbsl" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="registry-server" containerID="cri-o://c710cbd0c86fc871bfbc65c4ff12064e33cd481360c7ac01c88c14ed63064520" gracePeriod=2 Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.134180 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.140873 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.165176 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.295606 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc9hl\" (UniqueName: \"kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.296038 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.296079 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.398618 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.398676 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.398830 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc9hl\" (UniqueName: \"kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.399245 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.399266 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.424522 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc9hl\" (UniqueName: \"kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl\") pod \"redhat-operators-hrszk\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.469395 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.770393 4793 generic.go:334] "Generic (PLEG): container finished" podID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerID="c710cbd0c86fc871bfbc65c4ff12064e33cd481360c7ac01c88c14ed63064520" exitCode=0 Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.770442 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerDied","Data":"c710cbd0c86fc871bfbc65c4ff12064e33cd481360c7ac01c88c14ed63064520"} Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.770474 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-spbsl" event={"ID":"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e","Type":"ContainerDied","Data":"50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff"} Jan 27 21:24:05 crc kubenswrapper[4793]: I0127 21:24:05.770521 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50ef74b7d6a5e4445c4a258836411d281979080bfed9f98baac4b1cf21e00eff" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.344000 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.539627 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6wkk\" (UniqueName: \"kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk\") pod \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.539939 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content\") pod \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.540105 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities\") pod \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\" (UID: \"fa251902-f4bb-48d4-b1ba-cb14c6e5a19e\") " Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.540706 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities" (OuterVolumeSpecName: "utilities") pod "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" (UID: "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.542238 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.544583 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk" (OuterVolumeSpecName: "kube-api-access-k6wkk") pod "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" (UID: "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e"). InnerVolumeSpecName "kube-api-access-k6wkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.571308 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" (UID: "fa251902-f4bb-48d4-b1ba-cb14c6e5a19e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.597218 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.644342 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6wkk\" (UniqueName: \"kubernetes.io/projected/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-kube-api-access-k6wkk\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.644371 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.780473 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerStarted","Data":"0220bbad87bdbb9afd06cad5e7c74583aacc4771fcf3a3bd05fc5bdd0e0b1a6a"} Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.780507 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-spbsl" Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.823415 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:24:06 crc kubenswrapper[4793]: I0127 21:24:06.836308 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-spbsl"] Jan 27 21:24:07 crc kubenswrapper[4793]: I0127 21:24:07.792073 4793 generic.go:334] "Generic (PLEG): container finished" podID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerID="86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2" exitCode=0 Jan 27 21:24:07 crc kubenswrapper[4793]: I0127 21:24:07.792209 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerDied","Data":"86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2"} Jan 27 21:24:07 crc kubenswrapper[4793]: I0127 21:24:07.803429 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:24:07 crc kubenswrapper[4793]: E0127 21:24:07.804613 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:24:07 crc kubenswrapper[4793]: I0127 21:24:07.821238 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" path="/var/lib/kubelet/pods/fa251902-f4bb-48d4-b1ba-cb14c6e5a19e/volumes" Jan 27 21:24:08 crc kubenswrapper[4793]: I0127 21:24:08.803991 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerStarted","Data":"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f"} Jan 27 21:24:11 crc kubenswrapper[4793]: I0127 21:24:11.840087 4793 generic.go:334] "Generic (PLEG): container finished" podID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerID="486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f" exitCode=0 Jan 27 21:24:11 crc kubenswrapper[4793]: I0127 21:24:11.840187 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerDied","Data":"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f"} Jan 27 21:24:12 crc kubenswrapper[4793]: I0127 21:24:12.855592 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerStarted","Data":"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc"} Jan 27 21:24:12 crc kubenswrapper[4793]: I0127 21:24:12.889140 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hrszk" podStartSLOduration=3.417426986 podStartE2EDuration="7.889107225s" podCreationTimestamp="2026-01-27 21:24:05 +0000 UTC" firstStartedPulling="2026-01-27 21:24:07.794066652 +0000 UTC m=+4873.184319838" lastFinishedPulling="2026-01-27 21:24:12.265746911 +0000 UTC m=+4877.656000077" observedRunningTime="2026-01-27 21:24:12.876611098 +0000 UTC m=+4878.266864294" watchObservedRunningTime="2026-01-27 21:24:12.889107225 +0000 UTC m=+4878.279360421" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.470774 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.471175 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.833540 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:15 crc kubenswrapper[4793]: E0127 21:24:15.843598 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="extract-content" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.843659 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="extract-content" Jan 27 21:24:15 crc kubenswrapper[4793]: E0127 21:24:15.843728 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="extract-utilities" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.843741 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="extract-utilities" Jan 27 21:24:15 crc kubenswrapper[4793]: E0127 21:24:15.843768 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="registry-server" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.843801 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="registry-server" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.845063 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa251902-f4bb-48d4-b1ba-cb14c6e5a19e" containerName="registry-server" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.854489 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.866315 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.917360 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.917411 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:15 crc kubenswrapper[4793]: I0127 21:24:15.917554 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rng2w\" (UniqueName: \"kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.019947 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.020005 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.020158 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rng2w\" (UniqueName: \"kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.021008 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.021310 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.050483 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rng2w\" (UniqueName: \"kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w\") pod \"community-operators-24fbh\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.183497 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.539667 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hrszk" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="registry-server" probeResult="failure" output=< Jan 27 21:24:16 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:24:16 crc kubenswrapper[4793]: > Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.680667 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.907376 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerStarted","Data":"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af"} Jan 27 21:24:16 crc kubenswrapper[4793]: I0127 21:24:16.907423 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerStarted","Data":"7021d6e01760b6353b395fa06ed5e276b51945892dabcf614f183b45be20198a"} Jan 27 21:24:17 crc kubenswrapper[4793]: I0127 21:24:17.918832 4793 generic.go:334] "Generic (PLEG): container finished" podID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerID="5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af" exitCode=0 Jan 27 21:24:17 crc kubenswrapper[4793]: I0127 21:24:17.918939 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerDied","Data":"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af"} Jan 27 21:24:19 crc kubenswrapper[4793]: I0127 21:24:19.939266 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerStarted","Data":"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7"} Jan 27 21:24:20 crc kubenswrapper[4793]: I0127 21:24:20.803963 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:24:20 crc kubenswrapper[4793]: I0127 21:24:20.952712 4793 generic.go:334] "Generic (PLEG): container finished" podID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerID="75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7" exitCode=0 Jan 27 21:24:20 crc kubenswrapper[4793]: I0127 21:24:20.952778 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerDied","Data":"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7"} Jan 27 21:24:21 crc kubenswrapper[4793]: I0127 21:24:21.966722 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588"} Jan 27 21:24:21 crc kubenswrapper[4793]: I0127 21:24:21.969609 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerStarted","Data":"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86"} Jan 27 21:24:22 crc kubenswrapper[4793]: I0127 21:24:22.017074 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-24fbh" podStartSLOduration=3.463123198 podStartE2EDuration="7.017049924s" podCreationTimestamp="2026-01-27 21:24:15 +0000 UTC" firstStartedPulling="2026-01-27 21:24:17.920984007 +0000 UTC m=+4883.311237163" lastFinishedPulling="2026-01-27 21:24:21.474910733 +0000 UTC m=+4886.865163889" observedRunningTime="2026-01-27 21:24:22.013887406 +0000 UTC m=+4887.404140562" watchObservedRunningTime="2026-01-27 21:24:22.017049924 +0000 UTC m=+4887.407303090" Jan 27 21:24:23 crc kubenswrapper[4793]: I0127 21:24:23.242687 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:24:23 crc kubenswrapper[4793]: I0127 21:24:23.988637 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" exitCode=1 Jan 27 21:24:23 crc kubenswrapper[4793]: I0127 21:24:23.988746 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588"} Jan 27 21:24:23 crc kubenswrapper[4793]: I0127 21:24:23.989018 4793 scope.go:117] "RemoveContainer" containerID="52ca00c79ed960b6326e8ed566960da5264aa45bf4c8bedf0eaf62134d09b32e" Jan 27 21:24:23 crc kubenswrapper[4793]: I0127 21:24:23.990291 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:24:23 crc kubenswrapper[4793]: E0127 21:24:23.991275 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:24:25 crc kubenswrapper[4793]: I0127 21:24:25.524508 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:25 crc kubenswrapper[4793]: I0127 21:24:25.575441 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:25 crc kubenswrapper[4793]: I0127 21:24:25.765227 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:26 crc kubenswrapper[4793]: I0127 21:24:26.192471 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:26 crc kubenswrapper[4793]: I0127 21:24:26.193252 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.104640 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hrszk" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="registry-server" containerID="cri-o://9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc" gracePeriod=2 Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.246513 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-24fbh" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="registry-server" probeResult="failure" output=< Jan 27 21:24:27 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:24:27 crc kubenswrapper[4793]: > Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.672992 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.727786 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content\") pod \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.728033 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities\") pod \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.728076 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc9hl\" (UniqueName: \"kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl\") pod \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\" (UID: \"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b\") " Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.730154 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities" (OuterVolumeSpecName: "utilities") pod "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" (UID: "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.739320 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl" (OuterVolumeSpecName: "kube-api-access-fc9hl") pod "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" (UID: "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b"). InnerVolumeSpecName "kube-api-access-fc9hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.831266 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.831310 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc9hl\" (UniqueName: \"kubernetes.io/projected/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-kube-api-access-fc9hl\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.866315 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" (UID: "24b1ff32-7d3f-40d1-aaf8-6bab4010c25b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:27 crc kubenswrapper[4793]: I0127 21:24:27.932950 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.121111 4793 generic.go:334] "Generic (PLEG): container finished" podID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerID="9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc" exitCode=0 Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.121172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerDied","Data":"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc"} Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.121206 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrszk" event={"ID":"24b1ff32-7d3f-40d1-aaf8-6bab4010c25b","Type":"ContainerDied","Data":"0220bbad87bdbb9afd06cad5e7c74583aacc4771fcf3a3bd05fc5bdd0e0b1a6a"} Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.121247 4793 scope.go:117] "RemoveContainer" containerID="9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.121522 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrszk" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.163879 4793 scope.go:117] "RemoveContainer" containerID="486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.170999 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.182902 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hrszk"] Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.200318 4793 scope.go:117] "RemoveContainer" containerID="86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.242656 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.242751 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.242770 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.243447 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:24:28 crc kubenswrapper[4793]: E0127 21:24:28.243932 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.247813 4793 scope.go:117] "RemoveContainer" containerID="9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc" Jan 27 21:24:28 crc kubenswrapper[4793]: E0127 21:24:28.248319 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc\": container with ID starting with 9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc not found: ID does not exist" containerID="9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.248358 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc"} err="failed to get container status \"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc\": rpc error: code = NotFound desc = could not find container \"9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc\": container with ID starting with 9f04b79657d798ba4ea390634577729d04cdbfc1523622a8c79bd74ce755f6dc not found: ID does not exist" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.248385 4793 scope.go:117] "RemoveContainer" containerID="486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f" Jan 27 21:24:28 crc kubenswrapper[4793]: E0127 21:24:28.248685 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f\": container with ID starting with 486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f not found: ID does not exist" containerID="486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.248727 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f"} err="failed to get container status \"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f\": rpc error: code = NotFound desc = could not find container \"486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f\": container with ID starting with 486adc04b78bb78743145b16157acf26119e480fbf5d44036f9d6f1fdcdfab5f not found: ID does not exist" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.248749 4793 scope.go:117] "RemoveContainer" containerID="86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2" Jan 27 21:24:28 crc kubenswrapper[4793]: E0127 21:24:28.249032 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2\": container with ID starting with 86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2 not found: ID does not exist" containerID="86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2" Jan 27 21:24:28 crc kubenswrapper[4793]: I0127 21:24:28.249094 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2"} err="failed to get container status \"86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2\": rpc error: code = NotFound desc = could not find container \"86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2\": container with ID starting with 86013d362466b71874b54e3b5ba737a4770db0b35004c5abde0821d9305b99e2 not found: ID does not exist" Jan 27 21:24:29 crc kubenswrapper[4793]: I0127 21:24:29.823328 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" path="/var/lib/kubelet/pods/24b1ff32-7d3f-40d1-aaf8-6bab4010c25b/volumes" Jan 27 21:24:36 crc kubenswrapper[4793]: I0127 21:24:36.474924 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:36 crc kubenswrapper[4793]: I0127 21:24:36.534151 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:36 crc kubenswrapper[4793]: I0127 21:24:36.711436 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:38 crc kubenswrapper[4793]: I0127 21:24:38.264971 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-24fbh" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="registry-server" containerID="cri-o://46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86" gracePeriod=2 Jan 27 21:24:38 crc kubenswrapper[4793]: I0127 21:24:38.809224 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:38 crc kubenswrapper[4793]: I0127 21:24:38.999058 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rng2w\" (UniqueName: \"kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w\") pod \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " Jan 27 21:24:38 crc kubenswrapper[4793]: I0127 21:24:38.999236 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content\") pod \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " Jan 27 21:24:38 crc kubenswrapper[4793]: I0127 21:24:38.999374 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities\") pod \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\" (UID: \"87ad6ca8-f3ba-4b41-a571-e01443fdebf0\") " Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.001045 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities" (OuterVolumeSpecName: "utilities") pod "87ad6ca8-f3ba-4b41-a571-e01443fdebf0" (UID: "87ad6ca8-f3ba-4b41-a571-e01443fdebf0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.008944 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w" (OuterVolumeSpecName: "kube-api-access-rng2w") pod "87ad6ca8-f3ba-4b41-a571-e01443fdebf0" (UID: "87ad6ca8-f3ba-4b41-a571-e01443fdebf0"). InnerVolumeSpecName "kube-api-access-rng2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.057376 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87ad6ca8-f3ba-4b41-a571-e01443fdebf0" (UID: "87ad6ca8-f3ba-4b41-a571-e01443fdebf0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.102317 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rng2w\" (UniqueName: \"kubernetes.io/projected/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-kube-api-access-rng2w\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.102353 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.102362 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ad6ca8-f3ba-4b41-a571-e01443fdebf0-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.279243 4793 generic.go:334] "Generic (PLEG): container finished" podID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerID="46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86" exitCode=0 Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.279329 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24fbh" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.279328 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerDied","Data":"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86"} Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.279391 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24fbh" event={"ID":"87ad6ca8-f3ba-4b41-a571-e01443fdebf0","Type":"ContainerDied","Data":"7021d6e01760b6353b395fa06ed5e276b51945892dabcf614f183b45be20198a"} Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.279438 4793 scope.go:117] "RemoveContainer" containerID="46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.320209 4793 scope.go:117] "RemoveContainer" containerID="75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.351859 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.364483 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-24fbh"] Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.411632 4793 scope.go:117] "RemoveContainer" containerID="5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.444186 4793 scope.go:117] "RemoveContainer" containerID="46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86" Jan 27 21:24:39 crc kubenswrapper[4793]: E0127 21:24:39.445120 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86\": container with ID starting with 46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86 not found: ID does not exist" containerID="46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.445154 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86"} err="failed to get container status \"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86\": rpc error: code = NotFound desc = could not find container \"46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86\": container with ID starting with 46f2a36790b1031edc668b70f635d30f510a463f6762c21ba5c114a001452f86 not found: ID does not exist" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.445174 4793 scope.go:117] "RemoveContainer" containerID="75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7" Jan 27 21:24:39 crc kubenswrapper[4793]: E0127 21:24:39.445504 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7\": container with ID starting with 75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7 not found: ID does not exist" containerID="75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.445572 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7"} err="failed to get container status \"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7\": rpc error: code = NotFound desc = could not find container \"75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7\": container with ID starting with 75e33cb61c337d5c98065f945e1b3172d8f157e823a755449ce3c33a983104c7 not found: ID does not exist" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.445611 4793 scope.go:117] "RemoveContainer" containerID="5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af" Jan 27 21:24:39 crc kubenswrapper[4793]: E0127 21:24:39.445999 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af\": container with ID starting with 5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af not found: ID does not exist" containerID="5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.446034 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af"} err="failed to get container status \"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af\": rpc error: code = NotFound desc = could not find container \"5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af\": container with ID starting with 5b8dae830185114917053d61df24fef5e0e52df136ffdce57fad23c8c17489af not found: ID does not exist" Jan 27 21:24:39 crc kubenswrapper[4793]: E0127 21:24:39.613069 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87ad6ca8_f3ba_4b41_a571_e01443fdebf0.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87ad6ca8_f3ba_4b41_a571_e01443fdebf0.slice/crio-7021d6e01760b6353b395fa06ed5e276b51945892dabcf614f183b45be20198a\": RecentStats: unable to find data in memory cache]" Jan 27 21:24:39 crc kubenswrapper[4793]: I0127 21:24:39.819484 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" path="/var/lib/kubelet/pods/87ad6ca8-f3ba-4b41-a571-e01443fdebf0/volumes" Jan 27 21:24:40 crc kubenswrapper[4793]: I0127 21:24:40.803645 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:24:40 crc kubenswrapper[4793]: E0127 21:24:40.803917 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:24:53 crc kubenswrapper[4793]: I0127 21:24:53.804018 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:24:53 crc kubenswrapper[4793]: E0127 21:24:53.805037 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:25:05 crc kubenswrapper[4793]: I0127 21:25:05.813908 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:25:05 crc kubenswrapper[4793]: E0127 21:25:05.814809 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:25:17 crc kubenswrapper[4793]: I0127 21:25:17.814832 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:25:17 crc kubenswrapper[4793]: E0127 21:25:17.817098 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:25:32 crc kubenswrapper[4793]: I0127 21:25:32.803530 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:25:32 crc kubenswrapper[4793]: E0127 21:25:32.805793 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:25:46 crc kubenswrapper[4793]: I0127 21:25:46.803387 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:25:46 crc kubenswrapper[4793]: E0127 21:25:46.804293 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:25:58 crc kubenswrapper[4793]: I0127 21:25:58.803645 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:25:58 crc kubenswrapper[4793]: E0127 21:25:58.804402 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:26:10 crc kubenswrapper[4793]: I0127 21:26:10.804335 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:26:10 crc kubenswrapper[4793]: E0127 21:26:10.805299 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:26:21 crc kubenswrapper[4793]: I0127 21:26:21.804377 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:26:21 crc kubenswrapper[4793]: E0127 21:26:21.805465 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:26:22 crc kubenswrapper[4793]: I0127 21:26:22.915595 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:26:22 crc kubenswrapper[4793]: I0127 21:26:22.915655 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:26:35 crc kubenswrapper[4793]: I0127 21:26:35.819963 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:26:35 crc kubenswrapper[4793]: E0127 21:26:35.821176 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:26:50 crc kubenswrapper[4793]: I0127 21:26:50.803430 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:26:50 crc kubenswrapper[4793]: E0127 21:26:50.804344 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:26:52 crc kubenswrapper[4793]: I0127 21:26:52.753914 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:26:52 crc kubenswrapper[4793]: I0127 21:26:52.754254 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:27:03 crc kubenswrapper[4793]: I0127 21:27:03.803761 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:27:03 crc kubenswrapper[4793]: E0127 21:27:03.804811 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:27:17 crc kubenswrapper[4793]: I0127 21:27:17.803669 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:27:17 crc kubenswrapper[4793]: E0127 21:27:17.804456 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:27:22 crc kubenswrapper[4793]: I0127 21:27:22.808773 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:27:22 crc kubenswrapper[4793]: I0127 21:27:22.809508 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:27:22 crc kubenswrapper[4793]: I0127 21:27:22.809601 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:27:22 crc kubenswrapper[4793]: I0127 21:27:22.810650 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:27:22 crc kubenswrapper[4793]: I0127 21:27:22.810759 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637" gracePeriod=600 Jan 27 21:27:23 crc kubenswrapper[4793]: I0127 21:27:23.432492 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637" exitCode=0 Jan 27 21:27:23 crc kubenswrapper[4793]: I0127 21:27:23.432573 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637"} Jan 27 21:27:23 crc kubenswrapper[4793]: I0127 21:27:23.433294 4793 scope.go:117] "RemoveContainer" containerID="3f59d9a252acf3fe5cb182585e1eafa0cdf64d9531ca0ea00f742596b1e976bb" Jan 27 21:27:23 crc kubenswrapper[4793]: I0127 21:27:23.433043 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569"} Jan 27 21:27:32 crc kubenswrapper[4793]: I0127 21:27:32.830900 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:27:32 crc kubenswrapper[4793]: E0127 21:27:32.831987 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:27:47 crc kubenswrapper[4793]: I0127 21:27:47.803080 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:27:47 crc kubenswrapper[4793]: E0127 21:27:47.804007 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.788878 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.789886 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="extract-utilities" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.789903 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="extract-utilities" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.789931 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.789940 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.789955 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="extract-content" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.789961 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="extract-content" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.789973 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="extract-utilities" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.789979 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="extract-utilities" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.789995 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="extract-content" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.790001 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="extract-content" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.790009 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.790014 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.790205 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="87ad6ca8-f3ba-4b41-a571-e01443fdebf0" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.790226 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b1ff32-7d3f-40d1-aaf8-6bab4010c25b" containerName="registry-server" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.791671 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.810932 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:27:59 crc kubenswrapper[4793]: E0127 21:27:59.811940 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.865825 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.919123 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.919222 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b79tv\" (UniqueName: \"kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:27:59 crc kubenswrapper[4793]: I0127 21:27:59.919537 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.023137 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.023192 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b79tv\" (UniqueName: \"kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.023307 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.023881 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.024063 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.527889 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b79tv\" (UniqueName: \"kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv\") pod \"certified-operators-m486j\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:00 crc kubenswrapper[4793]: I0127 21:28:00.734694 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:01 crc kubenswrapper[4793]: I0127 21:28:01.246613 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:28:01 crc kubenswrapper[4793]: W0127 21:28:01.249444 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d9e7eb5_a2bb_4398_8e14_dedf8e22d940.slice/crio-0c2033efc10f9022103fe89bd44c2c8308ea1449842212545e82247a546db1f1 WatchSource:0}: Error finding container 0c2033efc10f9022103fe89bd44c2c8308ea1449842212545e82247a546db1f1: Status 404 returned error can't find the container with id 0c2033efc10f9022103fe89bd44c2c8308ea1449842212545e82247a546db1f1 Jan 27 21:28:01 crc kubenswrapper[4793]: I0127 21:28:01.862728 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerID="505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21" exitCode=0 Jan 27 21:28:01 crc kubenswrapper[4793]: I0127 21:28:01.862846 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerDied","Data":"505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21"} Jan 27 21:28:01 crc kubenswrapper[4793]: I0127 21:28:01.863030 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerStarted","Data":"0c2033efc10f9022103fe89bd44c2c8308ea1449842212545e82247a546db1f1"} Jan 27 21:28:02 crc kubenswrapper[4793]: I0127 21:28:02.876794 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerStarted","Data":"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08"} Jan 27 21:28:03 crc kubenswrapper[4793]: I0127 21:28:03.893220 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerID="46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08" exitCode=0 Jan 27 21:28:03 crc kubenswrapper[4793]: I0127 21:28:03.893336 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerDied","Data":"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08"} Jan 27 21:28:04 crc kubenswrapper[4793]: I0127 21:28:04.913434 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerStarted","Data":"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6"} Jan 27 21:28:04 crc kubenswrapper[4793]: I0127 21:28:04.940916 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m486j" podStartSLOduration=3.469419228 podStartE2EDuration="5.940868405s" podCreationTimestamp="2026-01-27 21:27:59 +0000 UTC" firstStartedPulling="2026-01-27 21:28:01.865899488 +0000 UTC m=+5107.256152644" lastFinishedPulling="2026-01-27 21:28:04.337348665 +0000 UTC m=+5109.727601821" observedRunningTime="2026-01-27 21:28:04.93415221 +0000 UTC m=+5110.324405366" watchObservedRunningTime="2026-01-27 21:28:04.940868405 +0000 UTC m=+5110.331121561" Jan 27 21:28:10 crc kubenswrapper[4793]: I0127 21:28:10.735370 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:10 crc kubenswrapper[4793]: I0127 21:28:10.735894 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:10 crc kubenswrapper[4793]: I0127 21:28:10.797301 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:11 crc kubenswrapper[4793]: I0127 21:28:11.162835 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:11 crc kubenswrapper[4793]: I0127 21:28:11.234433 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:28:11 crc kubenswrapper[4793]: I0127 21:28:11.804112 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:28:11 crc kubenswrapper[4793]: E0127 21:28:11.805154 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:28:12 crc kubenswrapper[4793]: I0127 21:28:12.997335 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m486j" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="registry-server" containerID="cri-o://31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6" gracePeriod=2 Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.572471 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.669412 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities\") pod \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.669631 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b79tv\" (UniqueName: \"kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv\") pod \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.669690 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content\") pod \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\" (UID: \"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940\") " Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.670574 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities" (OuterVolumeSpecName: "utilities") pod "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" (UID: "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.676356 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv" (OuterVolumeSpecName: "kube-api-access-b79tv") pod "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" (UID: "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940"). InnerVolumeSpecName "kube-api-access-b79tv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.771392 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b79tv\" (UniqueName: \"kubernetes.io/projected/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-kube-api-access-b79tv\") on node \"crc\" DevicePath \"\"" Jan 27 21:28:13 crc kubenswrapper[4793]: I0127 21:28:13.771425 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.009093 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" (UID: "8d9e7eb5-a2bb-4398-8e14-dedf8e22d940"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.012933 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerID="31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6" exitCode=0 Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.013030 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerDied","Data":"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6"} Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.013156 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m486j" event={"ID":"8d9e7eb5-a2bb-4398-8e14-dedf8e22d940","Type":"ContainerDied","Data":"0c2033efc10f9022103fe89bd44c2c8308ea1449842212545e82247a546db1f1"} Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.013193 4793 scope.go:117] "RemoveContainer" containerID="31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.013423 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m486j" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.065071 4793 scope.go:117] "RemoveContainer" containerID="46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.072767 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.078402 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.087945 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m486j"] Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.110504 4793 scope.go:117] "RemoveContainer" containerID="505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.159755 4793 scope.go:117] "RemoveContainer" containerID="31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6" Jan 27 21:28:14 crc kubenswrapper[4793]: E0127 21:28:14.160492 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6\": container with ID starting with 31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6 not found: ID does not exist" containerID="31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.160538 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6"} err="failed to get container status \"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6\": rpc error: code = NotFound desc = could not find container \"31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6\": container with ID starting with 31bc8367a3f9af9eb32fc4ad29f767efc26d877bd0fb0e024cc0b1fff0c1c6a6 not found: ID does not exist" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.160591 4793 scope.go:117] "RemoveContainer" containerID="46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08" Jan 27 21:28:14 crc kubenswrapper[4793]: E0127 21:28:14.161181 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08\": container with ID starting with 46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08 not found: ID does not exist" containerID="46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.161218 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08"} err="failed to get container status \"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08\": rpc error: code = NotFound desc = could not find container \"46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08\": container with ID starting with 46e13fbbc6e771ee3eef3057a864d8e127fbd4c7812e5b13f4c203c97e339c08 not found: ID does not exist" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.161246 4793 scope.go:117] "RemoveContainer" containerID="505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21" Jan 27 21:28:14 crc kubenswrapper[4793]: E0127 21:28:14.161538 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21\": container with ID starting with 505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21 not found: ID does not exist" containerID="505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21" Jan 27 21:28:14 crc kubenswrapper[4793]: I0127 21:28:14.161575 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21"} err="failed to get container status \"505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21\": rpc error: code = NotFound desc = could not find container \"505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21\": container with ID starting with 505651ea48f0b817f678ae6c6e6e34ec9d25eec9d5b0fe46adedf7035028ac21 not found: ID does not exist" Jan 27 21:28:15 crc kubenswrapper[4793]: I0127 21:28:15.819111 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" path="/var/lib/kubelet/pods/8d9e7eb5-a2bb-4398-8e14-dedf8e22d940/volumes" Jan 27 21:28:22 crc kubenswrapper[4793]: I0127 21:28:22.803431 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:28:22 crc kubenswrapper[4793]: E0127 21:28:22.804453 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:28:33 crc kubenswrapper[4793]: I0127 21:28:33.803935 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:28:33 crc kubenswrapper[4793]: E0127 21:28:33.804756 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:28:46 crc kubenswrapper[4793]: I0127 21:28:46.803752 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:28:46 crc kubenswrapper[4793]: E0127 21:28:46.804515 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:28:59 crc kubenswrapper[4793]: I0127 21:28:59.803045 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:28:59 crc kubenswrapper[4793]: E0127 21:28:59.803715 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:12 crc kubenswrapper[4793]: I0127 21:29:12.804762 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:29:12 crc kubenswrapper[4793]: E0127 21:29:12.805854 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:23 crc kubenswrapper[4793]: I0127 21:29:23.803247 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:29:24 crc kubenswrapper[4793]: I0127 21:29:24.267090 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009"} Jan 27 21:29:27 crc kubenswrapper[4793]: I0127 21:29:27.299378 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" exitCode=1 Jan 27 21:29:27 crc kubenswrapper[4793]: I0127 21:29:27.299450 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009"} Jan 27 21:29:27 crc kubenswrapper[4793]: I0127 21:29:27.299878 4793 scope.go:117] "RemoveContainer" containerID="de53fcc3532ca49b6f8652332adb9e234ecc0346b3ec7b65eb7d2cef0c53a588" Jan 27 21:29:27 crc kubenswrapper[4793]: I0127 21:29:27.300706 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:29:27 crc kubenswrapper[4793]: E0127 21:29:27.300984 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:28 crc kubenswrapper[4793]: I0127 21:29:28.243410 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:29:28 crc kubenswrapper[4793]: I0127 21:29:28.243760 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:29:28 crc kubenswrapper[4793]: I0127 21:29:28.243823 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:29:28 crc kubenswrapper[4793]: I0127 21:29:28.243882 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:29:28 crc kubenswrapper[4793]: I0127 21:29:28.310441 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:29:28 crc kubenswrapper[4793]: E0127 21:29:28.311778 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:29 crc kubenswrapper[4793]: I0127 21:29:29.322904 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:29:29 crc kubenswrapper[4793]: E0127 21:29:29.323616 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:42 crc kubenswrapper[4793]: I0127 21:29:42.859403 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:29:42 crc kubenswrapper[4793]: E0127 21:29:42.860065 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:29:52 crc kubenswrapper[4793]: I0127 21:29:52.754059 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:29:52 crc kubenswrapper[4793]: I0127 21:29:52.754717 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:29:54 crc kubenswrapper[4793]: I0127 21:29:54.805030 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:29:54 crc kubenswrapper[4793]: E0127 21:29:54.807628 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.158415 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv"] Jan 27 21:30:00 crc kubenswrapper[4793]: E0127 21:30:00.159756 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="extract-utilities" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.159777 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="extract-utilities" Jan 27 21:30:00 crc kubenswrapper[4793]: E0127 21:30:00.159797 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="registry-server" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.159806 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="registry-server" Jan 27 21:30:00 crc kubenswrapper[4793]: E0127 21:30:00.159822 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="extract-content" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.159830 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="extract-content" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.160080 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d9e7eb5-a2bb-4398-8e14-dedf8e22d940" containerName="registry-server" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.161045 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.165316 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.165675 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.170654 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv"] Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.308065 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nslj\" (UniqueName: \"kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.308137 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.308230 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.410068 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.410222 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nslj\" (UniqueName: \"kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.410268 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.411540 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.419198 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.429272 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nslj\" (UniqueName: \"kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj\") pod \"collect-profiles-29492490-r2gnv\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.487796 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:00 crc kubenswrapper[4793]: I0127 21:30:00.998921 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv"] Jan 27 21:30:01 crc kubenswrapper[4793]: I0127 21:30:01.741617 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" event={"ID":"e8347701-8062-418c-9ff1-9b7a05a3509e","Type":"ContainerStarted","Data":"1736be7a798a3618bf76683fb4f57755fc764015e2846571c7e9f6aba88410e1"} Jan 27 21:30:01 crc kubenswrapper[4793]: I0127 21:30:01.742998 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" event={"ID":"e8347701-8062-418c-9ff1-9b7a05a3509e","Type":"ContainerStarted","Data":"b9a5c2012313f6c0ec6ebdcc50c2bbac95159a308791c359a11c2e0f3e1980c9"} Jan 27 21:30:01 crc kubenswrapper[4793]: I0127 21:30:01.768226 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" podStartSLOduration=1.7682053899999999 podStartE2EDuration="1.76820539s" podCreationTimestamp="2026-01-27 21:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 21:30:01.765500455 +0000 UTC m=+5227.155753611" watchObservedRunningTime="2026-01-27 21:30:01.76820539 +0000 UTC m=+5227.158458546" Jan 27 21:30:02 crc kubenswrapper[4793]: I0127 21:30:02.754189 4793 generic.go:334] "Generic (PLEG): container finished" podID="e8347701-8062-418c-9ff1-9b7a05a3509e" containerID="1736be7a798a3618bf76683fb4f57755fc764015e2846571c7e9f6aba88410e1" exitCode=0 Jan 27 21:30:02 crc kubenswrapper[4793]: I0127 21:30:02.754270 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" event={"ID":"e8347701-8062-418c-9ff1-9b7a05a3509e","Type":"ContainerDied","Data":"1736be7a798a3618bf76683fb4f57755fc764015e2846571c7e9f6aba88410e1"} Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.288287 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.297537 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume\") pod \"e8347701-8062-418c-9ff1-9b7a05a3509e\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.297748 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume\") pod \"e8347701-8062-418c-9ff1-9b7a05a3509e\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.297863 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nslj\" (UniqueName: \"kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj\") pod \"e8347701-8062-418c-9ff1-9b7a05a3509e\" (UID: \"e8347701-8062-418c-9ff1-9b7a05a3509e\") " Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.298653 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume" (OuterVolumeSpecName: "config-volume") pod "e8347701-8062-418c-9ff1-9b7a05a3509e" (UID: "e8347701-8062-418c-9ff1-9b7a05a3509e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.305156 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e8347701-8062-418c-9ff1-9b7a05a3509e" (UID: "e8347701-8062-418c-9ff1-9b7a05a3509e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.307030 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj" (OuterVolumeSpecName: "kube-api-access-9nslj") pod "e8347701-8062-418c-9ff1-9b7a05a3509e" (UID: "e8347701-8062-418c-9ff1-9b7a05a3509e"). InnerVolumeSpecName "kube-api-access-9nslj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.399790 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e8347701-8062-418c-9ff1-9b7a05a3509e-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.399822 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e8347701-8062-418c-9ff1-9b7a05a3509e-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.399834 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nslj\" (UniqueName: \"kubernetes.io/projected/e8347701-8062-418c-9ff1-9b7a05a3509e-kube-api-access-9nslj\") on node \"crc\" DevicePath \"\"" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.780391 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" event={"ID":"e8347701-8062-418c-9ff1-9b7a05a3509e","Type":"ContainerDied","Data":"b9a5c2012313f6c0ec6ebdcc50c2bbac95159a308791c359a11c2e0f3e1980c9"} Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.780446 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9a5c2012313f6c0ec6ebdcc50c2bbac95159a308791c359a11c2e0f3e1980c9" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.780468 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv" Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.860941 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk"] Jan 27 21:30:04 crc kubenswrapper[4793]: I0127 21:30:04.870471 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492445-gnchk"] Jan 27 21:30:05 crc kubenswrapper[4793]: I0127 21:30:05.812037 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:30:05 crc kubenswrapper[4793]: E0127 21:30:05.814108 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:30:05 crc kubenswrapper[4793]: I0127 21:30:05.824763 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec45dbcf-8423-4c64-b5b0-2a84839af548" path="/var/lib/kubelet/pods/ec45dbcf-8423-4c64-b5b0-2a84839af548/volumes" Jan 27 21:30:19 crc kubenswrapper[4793]: I0127 21:30:19.804236 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:30:19 crc kubenswrapper[4793]: E0127 21:30:19.805208 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:30:22 crc kubenswrapper[4793]: I0127 21:30:22.754366 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:30:22 crc kubenswrapper[4793]: I0127 21:30:22.754735 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:30:33 crc kubenswrapper[4793]: I0127 21:30:33.804350 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:30:33 crc kubenswrapper[4793]: E0127 21:30:33.805395 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:30:36 crc kubenswrapper[4793]: I0127 21:30:36.658972 4793 scope.go:117] "RemoveContainer" containerID="9970dc4f78c2d2ba4a97756179dc64435951717ad08568b6c2665e9239dfe5b5" Jan 27 21:30:36 crc kubenswrapper[4793]: I0127 21:30:36.720310 4793 scope.go:117] "RemoveContainer" containerID="c710cbd0c86fc871bfbc65c4ff12064e33cd481360c7ac01c88c14ed63064520" Jan 27 21:30:36 crc kubenswrapper[4793]: I0127 21:30:36.759889 4793 scope.go:117] "RemoveContainer" containerID="2d7114e9932dc3f36e5e66f5002f588078a2aebb67622cdad1534d36532cb8ce" Jan 27 21:30:36 crc kubenswrapper[4793]: I0127 21:30:36.786497 4793 scope.go:117] "RemoveContainer" containerID="a8c0a5dda825bab7f2525db5e510a8f6dabaddf3aec1ff224db88236a63fdf0c" Jan 27 21:30:44 crc kubenswrapper[4793]: I0127 21:30:44.803317 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:30:44 crc kubenswrapper[4793]: E0127 21:30:44.804150 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:30:52 crc kubenswrapper[4793]: I0127 21:30:52.754030 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:30:52 crc kubenswrapper[4793]: I0127 21:30:52.754643 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:30:52 crc kubenswrapper[4793]: I0127 21:30:52.754693 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:30:52 crc kubenswrapper[4793]: I0127 21:30:52.755719 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:30:52 crc kubenswrapper[4793]: I0127 21:30:52.755780 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" gracePeriod=600 Jan 27 21:30:52 crc kubenswrapper[4793]: E0127 21:30:52.881011 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:30:53 crc kubenswrapper[4793]: I0127 21:30:53.291632 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" exitCode=0 Jan 27 21:30:53 crc kubenswrapper[4793]: I0127 21:30:53.291691 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569"} Jan 27 21:30:53 crc kubenswrapper[4793]: I0127 21:30:53.291744 4793 scope.go:117] "RemoveContainer" containerID="805a666f98506e4c93d143d9f1a985ffcc479cdb8fb96f76713774c093745637" Jan 27 21:30:53 crc kubenswrapper[4793]: I0127 21:30:53.292921 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:30:53 crc kubenswrapper[4793]: E0127 21:30:53.293583 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:30:55 crc kubenswrapper[4793]: I0127 21:30:55.820330 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:30:55 crc kubenswrapper[4793]: E0127 21:30:55.821234 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:31:04 crc kubenswrapper[4793]: I0127 21:31:04.803981 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:31:04 crc kubenswrapper[4793]: E0127 21:31:04.805160 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:31:09 crc kubenswrapper[4793]: I0127 21:31:09.804184 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:31:09 crc kubenswrapper[4793]: E0127 21:31:09.805004 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:31:15 crc kubenswrapper[4793]: I0127 21:31:15.809858 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:31:15 crc kubenswrapper[4793]: E0127 21:31:15.810851 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:31:22 crc kubenswrapper[4793]: I0127 21:31:22.803628 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:31:22 crc kubenswrapper[4793]: E0127 21:31:22.804753 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:31:29 crc kubenswrapper[4793]: I0127 21:31:29.804014 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:31:29 crc kubenswrapper[4793]: E0127 21:31:29.804783 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:31:33 crc kubenswrapper[4793]: I0127 21:31:33.803860 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:31:33 crc kubenswrapper[4793]: E0127 21:31:33.804627 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:31:41 crc kubenswrapper[4793]: I0127 21:31:41.803805 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:31:41 crc kubenswrapper[4793]: E0127 21:31:41.804626 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:31:45 crc kubenswrapper[4793]: I0127 21:31:45.811578 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:31:45 crc kubenswrapper[4793]: E0127 21:31:45.812525 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:31:54 crc kubenswrapper[4793]: I0127 21:31:54.803913 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:31:54 crc kubenswrapper[4793]: E0127 21:31:54.804722 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:31:56 crc kubenswrapper[4793]: I0127 21:31:56.803255 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:31:56 crc kubenswrapper[4793]: E0127 21:31:56.803776 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:32:08 crc kubenswrapper[4793]: I0127 21:32:08.804084 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:32:08 crc kubenswrapper[4793]: E0127 21:32:08.805242 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:32:09 crc kubenswrapper[4793]: I0127 21:32:09.803974 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:32:09 crc kubenswrapper[4793]: E0127 21:32:09.804754 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:32:20 crc kubenswrapper[4793]: I0127 21:32:20.860129 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:32:20 crc kubenswrapper[4793]: E0127 21:32:20.861039 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:32:21 crc kubenswrapper[4793]: I0127 21:32:21.803626 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:32:21 crc kubenswrapper[4793]: E0127 21:32:21.803903 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:32:31 crc kubenswrapper[4793]: I0127 21:32:31.803387 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:32:31 crc kubenswrapper[4793]: E0127 21:32:31.804342 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:32:36 crc kubenswrapper[4793]: I0127 21:32:36.804633 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:32:36 crc kubenswrapper[4793]: E0127 21:32:36.805619 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:32:42 crc kubenswrapper[4793]: I0127 21:32:42.804777 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:32:42 crc kubenswrapper[4793]: E0127 21:32:42.807441 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:32:47 crc kubenswrapper[4793]: I0127 21:32:47.807610 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:32:47 crc kubenswrapper[4793]: E0127 21:32:47.808322 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:32:53 crc kubenswrapper[4793]: I0127 21:32:53.803167 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:32:53 crc kubenswrapper[4793]: E0127 21:32:53.804071 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:33:00 crc kubenswrapper[4793]: I0127 21:33:00.803813 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:33:00 crc kubenswrapper[4793]: E0127 21:33:00.804639 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:33:07 crc kubenswrapper[4793]: I0127 21:33:07.804303 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:33:07 crc kubenswrapper[4793]: E0127 21:33:07.805252 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:33:14 crc kubenswrapper[4793]: I0127 21:33:14.803578 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:33:14 crc kubenswrapper[4793]: E0127 21:33:14.804309 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:33:22 crc kubenswrapper[4793]: I0127 21:33:22.804260 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:33:22 crc kubenswrapper[4793]: E0127 21:33:22.805192 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:33:28 crc kubenswrapper[4793]: I0127 21:33:28.803716 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:33:28 crc kubenswrapper[4793]: E0127 21:33:28.804504 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:33:35 crc kubenswrapper[4793]: I0127 21:33:35.823195 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:33:35 crc kubenswrapper[4793]: E0127 21:33:35.823876 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:33:43 crc kubenswrapper[4793]: I0127 21:33:43.804251 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:33:43 crc kubenswrapper[4793]: E0127 21:33:43.805297 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:33:46 crc kubenswrapper[4793]: I0127 21:33:46.804038 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:33:46 crc kubenswrapper[4793]: E0127 21:33:46.805145 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:33:58 crc kubenswrapper[4793]: I0127 21:33:58.803929 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:33:58 crc kubenswrapper[4793]: E0127 21:33:58.804720 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:33:59 crc kubenswrapper[4793]: I0127 21:33:59.804332 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:33:59 crc kubenswrapper[4793]: E0127 21:33:59.805178 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:34:10 crc kubenswrapper[4793]: I0127 21:34:10.804283 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:34:10 crc kubenswrapper[4793]: E0127 21:34:10.805172 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:34:13 crc kubenswrapper[4793]: I0127 21:34:13.804005 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:34:13 crc kubenswrapper[4793]: E0127 21:34:13.805010 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.579104 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:15 crc kubenswrapper[4793]: E0127 21:34:15.580049 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8347701-8062-418c-9ff1-9b7a05a3509e" containerName="collect-profiles" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.580070 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8347701-8062-418c-9ff1-9b7a05a3509e" containerName="collect-profiles" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.580325 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8347701-8062-418c-9ff1-9b7a05a3509e" containerName="collect-profiles" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.582752 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.612545 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.772597 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rdpk\" (UniqueName: \"kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.772751 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.772863 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.878892 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rdpk\" (UniqueName: \"kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.879040 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.879834 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.880286 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.880882 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.911701 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rdpk\" (UniqueName: \"kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk\") pod \"redhat-marketplace-rh7h2\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:15 crc kubenswrapper[4793]: I0127 21:34:15.912621 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:16 crc kubenswrapper[4793]: I0127 21:34:16.451337 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:17 crc kubenswrapper[4793]: I0127 21:34:17.344988 4793 generic.go:334] "Generic (PLEG): container finished" podID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerID="a98d3332e820a32becac4dd78271da3bb9778abd9e5b3c95315f9d51e6b29873" exitCode=0 Jan 27 21:34:17 crc kubenswrapper[4793]: I0127 21:34:17.345232 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerDied","Data":"a98d3332e820a32becac4dd78271da3bb9778abd9e5b3c95315f9d51e6b29873"} Jan 27 21:34:17 crc kubenswrapper[4793]: I0127 21:34:17.345259 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerStarted","Data":"e5231c3fdf893e85fb388854670dac495a7f40cd8fe36d7ef67a4aa7c9d4a7a1"} Jan 27 21:34:17 crc kubenswrapper[4793]: I0127 21:34:17.354460 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:34:18 crc kubenswrapper[4793]: I0127 21:34:18.358277 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerStarted","Data":"62035dc44b10c65225e8cfa6e809e75bce2d41c07e289e16d0d83927728047e5"} Jan 27 21:34:19 crc kubenswrapper[4793]: I0127 21:34:19.371615 4793 generic.go:334] "Generic (PLEG): container finished" podID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerID="62035dc44b10c65225e8cfa6e809e75bce2d41c07e289e16d0d83927728047e5" exitCode=0 Jan 27 21:34:19 crc kubenswrapper[4793]: I0127 21:34:19.371749 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerDied","Data":"62035dc44b10c65225e8cfa6e809e75bce2d41c07e289e16d0d83927728047e5"} Jan 27 21:34:20 crc kubenswrapper[4793]: I0127 21:34:20.386097 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerStarted","Data":"e785f1b1c77e6ed9107b8b9f769ab032514d64c2850bbf1fdd3a97acbbe0cb52"} Jan 27 21:34:20 crc kubenswrapper[4793]: I0127 21:34:20.411628 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rh7h2" podStartSLOduration=2.974566145 podStartE2EDuration="5.411611801s" podCreationTimestamp="2026-01-27 21:34:15 +0000 UTC" firstStartedPulling="2026-01-27 21:34:17.354208927 +0000 UTC m=+5482.744462083" lastFinishedPulling="2026-01-27 21:34:19.791254583 +0000 UTC m=+5485.181507739" observedRunningTime="2026-01-27 21:34:20.406803346 +0000 UTC m=+5485.797056502" watchObservedRunningTime="2026-01-27 21:34:20.411611801 +0000 UTC m=+5485.801864947" Jan 27 21:34:22 crc kubenswrapper[4793]: I0127 21:34:22.804254 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:34:22 crc kubenswrapper[4793]: E0127 21:34:22.804891 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:34:22 crc kubenswrapper[4793]: I0127 21:34:22.973919 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:22 crc kubenswrapper[4793]: I0127 21:34:22.976708 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.011823 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.064662 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.064864 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvklz\" (UniqueName: \"kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.065143 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.167752 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.167875 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.167923 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvklz\" (UniqueName: \"kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.168416 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.168478 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.429123 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvklz\" (UniqueName: \"kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz\") pod \"redhat-operators-vwj8j\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:23 crc kubenswrapper[4793]: I0127 21:34:23.608309 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:24 crc kubenswrapper[4793]: I0127 21:34:24.204627 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:24 crc kubenswrapper[4793]: I0127 21:34:24.426568 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerStarted","Data":"96a2bfc9df1cdd012b56c3961d3d414adcd620a240c21e3f8cb53e22b1e28b87"} Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.440378 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9940139-05b8-4361-a60d-b9675e25de6d" containerID="ecafb7f701a4bb49a939b9197816670423f0c423dfd1f652625b23a56a5c48f3" exitCode=0 Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.440525 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerDied","Data":"ecafb7f701a4bb49a939b9197816670423f0c423dfd1f652625b23a56a5c48f3"} Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.818920 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:34:25 crc kubenswrapper[4793]: E0127 21:34:25.819491 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.914593 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.914661 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:25 crc kubenswrapper[4793]: I0127 21:34:25.972052 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:26 crc kubenswrapper[4793]: I0127 21:34:26.452413 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerStarted","Data":"2b438e9cb9e64ec1b6ca78f830b1a2b7656859b86e92b45903cf5aa0377640e7"} Jan 27 21:34:26 crc kubenswrapper[4793]: I0127 21:34:26.503767 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:28 crc kubenswrapper[4793]: I0127 21:34:28.355705 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:28 crc kubenswrapper[4793]: I0127 21:34:28.473502 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rh7h2" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="registry-server" containerID="cri-o://e785f1b1c77e6ed9107b8b9f769ab032514d64c2850bbf1fdd3a97acbbe0cb52" gracePeriod=2 Jan 27 21:34:29 crc kubenswrapper[4793]: I0127 21:34:29.490480 4793 generic.go:334] "Generic (PLEG): container finished" podID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerID="e785f1b1c77e6ed9107b8b9f769ab032514d64c2850bbf1fdd3a97acbbe0cb52" exitCode=0 Jan 27 21:34:29 crc kubenswrapper[4793]: I0127 21:34:29.490627 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerDied","Data":"e785f1b1c77e6ed9107b8b9f769ab032514d64c2850bbf1fdd3a97acbbe0cb52"} Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.186680 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.237594 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities\") pod \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.237682 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content\") pod \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.237786 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rdpk\" (UniqueName: \"kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk\") pod \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\" (UID: \"8e21de91-0807-4016-b5e5-a7b75ef4f2c1\") " Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.238881 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities" (OuterVolumeSpecName: "utilities") pod "8e21de91-0807-4016-b5e5-a7b75ef4f2c1" (UID: "8e21de91-0807-4016-b5e5-a7b75ef4f2c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.244242 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk" (OuterVolumeSpecName: "kube-api-access-2rdpk") pod "8e21de91-0807-4016-b5e5-a7b75ef4f2c1" (UID: "8e21de91-0807-4016-b5e5-a7b75ef4f2c1"). InnerVolumeSpecName "kube-api-access-2rdpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.257293 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e21de91-0807-4016-b5e5-a7b75ef4f2c1" (UID: "8e21de91-0807-4016-b5e5-a7b75ef4f2c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.340775 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.340825 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.340843 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rdpk\" (UniqueName: \"kubernetes.io/projected/8e21de91-0807-4016-b5e5-a7b75ef4f2c1-kube-api-access-2rdpk\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.504432 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rh7h2" event={"ID":"8e21de91-0807-4016-b5e5-a7b75ef4f2c1","Type":"ContainerDied","Data":"e5231c3fdf893e85fb388854670dac495a7f40cd8fe36d7ef67a4aa7c9d4a7a1"} Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.504501 4793 scope.go:117] "RemoveContainer" containerID="e785f1b1c77e6ed9107b8b9f769ab032514d64c2850bbf1fdd3a97acbbe0cb52" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.504621 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rh7h2" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.539241 4793 scope.go:117] "RemoveContainer" containerID="62035dc44b10c65225e8cfa6e809e75bce2d41c07e289e16d0d83927728047e5" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.563446 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.567178 4793 scope.go:117] "RemoveContainer" containerID="a98d3332e820a32becac4dd78271da3bb9778abd9e5b3c95315f9d51e6b29873" Jan 27 21:34:30 crc kubenswrapper[4793]: I0127 21:34:30.579041 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rh7h2"] Jan 27 21:34:31 crc kubenswrapper[4793]: I0127 21:34:31.818022 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" path="/var/lib/kubelet/pods/8e21de91-0807-4016-b5e5-a7b75ef4f2c1/volumes" Jan 27 21:34:33 crc kubenswrapper[4793]: I0127 21:34:33.544096 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9940139-05b8-4361-a60d-b9675e25de6d" containerID="2b438e9cb9e64ec1b6ca78f830b1a2b7656859b86e92b45903cf5aa0377640e7" exitCode=0 Jan 27 21:34:33 crc kubenswrapper[4793]: I0127 21:34:33.544174 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerDied","Data":"2b438e9cb9e64ec1b6ca78f830b1a2b7656859b86e92b45903cf5aa0377640e7"} Jan 27 21:34:35 crc kubenswrapper[4793]: I0127 21:34:35.583642 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerStarted","Data":"bbb05aa4dd4e7f932b3ff340d8196b53123c69f4a8c2c854e359f81ad2d377cc"} Jan 27 21:34:35 crc kubenswrapper[4793]: I0127 21:34:35.625866 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vwj8j" podStartSLOduration=5.085571198 podStartE2EDuration="13.625834487s" podCreationTimestamp="2026-01-27 21:34:22 +0000 UTC" firstStartedPulling="2026-01-27 21:34:25.443377359 +0000 UTC m=+5490.833630515" lastFinishedPulling="2026-01-27 21:34:33.983640648 +0000 UTC m=+5499.373893804" observedRunningTime="2026-01-27 21:34:35.607909446 +0000 UTC m=+5500.998162622" watchObservedRunningTime="2026-01-27 21:34:35.625834487 +0000 UTC m=+5501.016087683" Jan 27 21:34:35 crc kubenswrapper[4793]: I0127 21:34:35.805033 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:34:35 crc kubenswrapper[4793]: E0127 21:34:35.805415 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:34:38 crc kubenswrapper[4793]: I0127 21:34:38.803727 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:34:39 crc kubenswrapper[4793]: I0127 21:34:39.625387 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3"} Jan 27 21:34:42 crc kubenswrapper[4793]: I0127 21:34:42.660346 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" exitCode=1 Jan 27 21:34:42 crc kubenswrapper[4793]: I0127 21:34:42.660582 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3"} Jan 27 21:34:42 crc kubenswrapper[4793]: I0127 21:34:42.660782 4793 scope.go:117] "RemoveContainer" containerID="216dbb00fd0ac0febcd701ce1d56153cf20476f27a8e0f5f0502ef9bab237009" Jan 27 21:34:42 crc kubenswrapper[4793]: I0127 21:34:42.661839 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:34:42 crc kubenswrapper[4793]: E0127 21:34:42.662312 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:43 crc kubenswrapper[4793]: I0127 21:34:43.242951 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:34:43 crc kubenswrapper[4793]: I0127 21:34:43.608738 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:43 crc kubenswrapper[4793]: I0127 21:34:43.608796 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:43 crc kubenswrapper[4793]: I0127 21:34:43.675165 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:34:43 crc kubenswrapper[4793]: E0127 21:34:43.677862 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:44 crc kubenswrapper[4793]: I0127 21:34:44.573072 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:44 crc kubenswrapper[4793]: I0127 21:34:44.654731 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:44 crc kubenswrapper[4793]: I0127 21:34:44.844246 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:45 crc kubenswrapper[4793]: I0127 21:34:45.705789 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vwj8j" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="registry-server" containerID="cri-o://bbb05aa4dd4e7f932b3ff340d8196b53123c69f4a8c2c854e359f81ad2d377cc" gracePeriod=2 Jan 27 21:34:46 crc kubenswrapper[4793]: I0127 21:34:46.722860 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9940139-05b8-4361-a60d-b9675e25de6d" containerID="bbb05aa4dd4e7f932b3ff340d8196b53123c69f4a8c2c854e359f81ad2d377cc" exitCode=0 Jan 27 21:34:46 crc kubenswrapper[4793]: I0127 21:34:46.722948 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerDied","Data":"bbb05aa4dd4e7f932b3ff340d8196b53123c69f4a8c2c854e359f81ad2d377cc"} Jan 27 21:34:46 crc kubenswrapper[4793]: I0127 21:34:46.722993 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwj8j" event={"ID":"e9940139-05b8-4361-a60d-b9675e25de6d","Type":"ContainerDied","Data":"96a2bfc9df1cdd012b56c3961d3d414adcd620a240c21e3f8cb53e22b1e28b87"} Jan 27 21:34:46 crc kubenswrapper[4793]: I0127 21:34:46.723014 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96a2bfc9df1cdd012b56c3961d3d414adcd620a240c21e3f8cb53e22b1e28b87" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.020017 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.129510 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities\") pod \"e9940139-05b8-4361-a60d-b9675e25de6d\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.129617 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content\") pod \"e9940139-05b8-4361-a60d-b9675e25de6d\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.129823 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvklz\" (UniqueName: \"kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz\") pod \"e9940139-05b8-4361-a60d-b9675e25de6d\" (UID: \"e9940139-05b8-4361-a60d-b9675e25de6d\") " Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.130592 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities" (OuterVolumeSpecName: "utilities") pod "e9940139-05b8-4361-a60d-b9675e25de6d" (UID: "e9940139-05b8-4361-a60d-b9675e25de6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.131083 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.150893 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz" (OuterVolumeSpecName: "kube-api-access-lvklz") pod "e9940139-05b8-4361-a60d-b9675e25de6d" (UID: "e9940139-05b8-4361-a60d-b9675e25de6d"). InnerVolumeSpecName "kube-api-access-lvklz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.232928 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvklz\" (UniqueName: \"kubernetes.io/projected/e9940139-05b8-4361-a60d-b9675e25de6d-kube-api-access-lvklz\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.258023 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e9940139-05b8-4361-a60d-b9675e25de6d" (UID: "e9940139-05b8-4361-a60d-b9675e25de6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.334503 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9940139-05b8-4361-a60d-b9675e25de6d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.730495 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwj8j" Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.791778 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:47 crc kubenswrapper[4793]: I0127 21:34:47.818429 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vwj8j"] Jan 27 21:34:48 crc kubenswrapper[4793]: I0127 21:34:48.242956 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:34:48 crc kubenswrapper[4793]: I0127 21:34:48.243280 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:34:48 crc kubenswrapper[4793]: I0127 21:34:48.243364 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:34:48 crc kubenswrapper[4793]: I0127 21:34:48.244045 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:34:48 crc kubenswrapper[4793]: E0127 21:34:48.244363 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:48 crc kubenswrapper[4793]: I0127 21:34:48.839781 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:34:48 crc kubenswrapper[4793]: E0127 21:34:48.840402 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:34:49 crc kubenswrapper[4793]: I0127 21:34:49.816355 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" path="/var/lib/kubelet/pods/e9940139-05b8-4361-a60d-b9675e25de6d/volumes" Jan 27 21:34:50 crc kubenswrapper[4793]: I0127 21:34:50.804352 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:34:50 crc kubenswrapper[4793]: E0127 21:34:50.805368 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:34:59 crc kubenswrapper[4793]: I0127 21:34:59.803988 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:34:59 crc kubenswrapper[4793]: E0127 21:34:59.804844 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:35:03 crc kubenswrapper[4793]: I0127 21:35:03.804082 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:35:03 crc kubenswrapper[4793]: E0127 21:35:03.806021 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:35:10 crc kubenswrapper[4793]: I0127 21:35:10.803870 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:35:10 crc kubenswrapper[4793]: E0127 21:35:10.806712 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:35:16 crc kubenswrapper[4793]: I0127 21:35:16.803820 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:35:16 crc kubenswrapper[4793]: E0127 21:35:16.804808 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:35:24 crc kubenswrapper[4793]: I0127 21:35:24.804275 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:35:24 crc kubenswrapper[4793]: E0127 21:35:24.805224 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:35:27 crc kubenswrapper[4793]: I0127 21:35:27.804116 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:35:27 crc kubenswrapper[4793]: E0127 21:35:27.804856 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:35:38 crc kubenswrapper[4793]: I0127 21:35:38.804366 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:35:38 crc kubenswrapper[4793]: E0127 21:35:38.805428 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:35:41 crc kubenswrapper[4793]: I0127 21:35:41.803759 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:35:41 crc kubenswrapper[4793]: E0127 21:35:41.804663 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:35:52 crc kubenswrapper[4793]: I0127 21:35:52.804145 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:35:52 crc kubenswrapper[4793]: E0127 21:35:52.805420 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:35:54 crc kubenswrapper[4793]: I0127 21:35:54.804241 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:35:55 crc kubenswrapper[4793]: I0127 21:35:55.884068 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf"} Jan 27 21:36:04 crc kubenswrapper[4793]: I0127 21:36:04.804409 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:36:04 crc kubenswrapper[4793]: E0127 21:36:04.805284 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:36:15 crc kubenswrapper[4793]: I0127 21:36:15.811386 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:36:15 crc kubenswrapper[4793]: E0127 21:36:15.812263 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:36:29 crc kubenswrapper[4793]: I0127 21:36:29.805192 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:36:29 crc kubenswrapper[4793]: E0127 21:36:29.806755 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:36:43 crc kubenswrapper[4793]: I0127 21:36:43.803904 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:36:43 crc kubenswrapper[4793]: E0127 21:36:43.804826 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:36:56 crc kubenswrapper[4793]: I0127 21:36:56.803603 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:36:56 crc kubenswrapper[4793]: E0127 21:36:56.804519 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:37:08 crc kubenswrapper[4793]: I0127 21:37:08.804369 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:37:08 crc kubenswrapper[4793]: E0127 21:37:08.805798 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:37:19 crc kubenswrapper[4793]: I0127 21:37:19.804059 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:37:19 crc kubenswrapper[4793]: E0127 21:37:19.805174 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:37:30 crc kubenswrapper[4793]: I0127 21:37:30.804002 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:37:30 crc kubenswrapper[4793]: E0127 21:37:30.804814 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:37:41 crc kubenswrapper[4793]: I0127 21:37:41.804197 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:37:41 crc kubenswrapper[4793]: E0127 21:37:41.804976 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:37:53 crc kubenswrapper[4793]: I0127 21:37:53.804245 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:37:53 crc kubenswrapper[4793]: E0127 21:37:53.805483 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:38:06 crc kubenswrapper[4793]: I0127 21:38:06.804159 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:38:06 crc kubenswrapper[4793]: E0127 21:38:06.807241 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:38:20 crc kubenswrapper[4793]: I0127 21:38:20.805005 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:38:20 crc kubenswrapper[4793]: E0127 21:38:20.806190 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:38:22 crc kubenswrapper[4793]: I0127 21:38:22.753155 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:38:22 crc kubenswrapper[4793]: I0127 21:38:22.753521 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.143918 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145267 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="extract-utilities" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145284 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="extract-utilities" Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145307 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145315 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145329 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="extract-utilities" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145339 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="extract-utilities" Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145397 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145405 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145418 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="extract-content" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145426 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="extract-content" Jan 27 21:38:31 crc kubenswrapper[4793]: E0127 21:38:31.145439 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="extract-content" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145446 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="extract-content" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145732 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9940139-05b8-4361-a60d-b9675e25de6d" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.145759 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e21de91-0807-4016-b5e5-a7b75ef4f2c1" containerName="registry-server" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.150083 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.163117 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.207111 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kv4r\" (UniqueName: \"kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.207338 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.207863 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.310296 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.310378 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kv4r\" (UniqueName: \"kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.310432 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.310997 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.311401 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:31 crc kubenswrapper[4793]: I0127 21:38:31.930966 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kv4r\" (UniqueName: \"kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r\") pod \"certified-operators-gq9g9\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:32 crc kubenswrapper[4793]: I0127 21:38:32.093501 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:32 crc kubenswrapper[4793]: I0127 21:38:32.589942 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:33 crc kubenswrapper[4793]: I0127 21:38:33.116894 4793 generic.go:334] "Generic (PLEG): container finished" podID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerID="9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3" exitCode=0 Jan 27 21:38:33 crc kubenswrapper[4793]: I0127 21:38:33.117063 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerDied","Data":"9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3"} Jan 27 21:38:33 crc kubenswrapper[4793]: I0127 21:38:33.117179 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerStarted","Data":"78ff732619a55c4c348b8bea07365877679e016b63769534a42f7f87311e5c37"} Jan 27 21:38:35 crc kubenswrapper[4793]: I0127 21:38:35.136809 4793 generic.go:334] "Generic (PLEG): container finished" podID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerID="643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77" exitCode=0 Jan 27 21:38:35 crc kubenswrapper[4793]: I0127 21:38:35.136952 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerDied","Data":"643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77"} Jan 27 21:38:35 crc kubenswrapper[4793]: I0127 21:38:35.809377 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:38:35 crc kubenswrapper[4793]: E0127 21:38:35.809831 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:38:36 crc kubenswrapper[4793]: I0127 21:38:36.168375 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerStarted","Data":"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f"} Jan 27 21:38:36 crc kubenswrapper[4793]: I0127 21:38:36.200837 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gq9g9" podStartSLOduration=2.758182912 podStartE2EDuration="5.200815013s" podCreationTimestamp="2026-01-27 21:38:31 +0000 UTC" firstStartedPulling="2026-01-27 21:38:33.11942104 +0000 UTC m=+5738.509674236" lastFinishedPulling="2026-01-27 21:38:35.562053181 +0000 UTC m=+5740.952306337" observedRunningTime="2026-01-27 21:38:36.193601249 +0000 UTC m=+5741.583854425" watchObservedRunningTime="2026-01-27 21:38:36.200815013 +0000 UTC m=+5741.591068179" Jan 27 21:38:42 crc kubenswrapper[4793]: I0127 21:38:42.094645 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:42 crc kubenswrapper[4793]: I0127 21:38:42.095242 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:42 crc kubenswrapper[4793]: I0127 21:38:42.162805 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:42 crc kubenswrapper[4793]: I0127 21:38:42.272484 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:42 crc kubenswrapper[4793]: I0127 21:38:42.418738 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.241038 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gq9g9" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="registry-server" containerID="cri-o://5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f" gracePeriod=2 Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.730126 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.822808 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities\") pod \"ab148f3e-4693-42f1-96ae-83ba83ed3618\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.822901 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content\") pod \"ab148f3e-4693-42f1-96ae-83ba83ed3618\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.823001 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kv4r\" (UniqueName: \"kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r\") pod \"ab148f3e-4693-42f1-96ae-83ba83ed3618\" (UID: \"ab148f3e-4693-42f1-96ae-83ba83ed3618\") " Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.824030 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities" (OuterVolumeSpecName: "utilities") pod "ab148f3e-4693-42f1-96ae-83ba83ed3618" (UID: "ab148f3e-4693-42f1-96ae-83ba83ed3618"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.830140 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r" (OuterVolumeSpecName: "kube-api-access-9kv4r") pod "ab148f3e-4693-42f1-96ae-83ba83ed3618" (UID: "ab148f3e-4693-42f1-96ae-83ba83ed3618"). InnerVolumeSpecName "kube-api-access-9kv4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.886169 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab148f3e-4693-42f1-96ae-83ba83ed3618" (UID: "ab148f3e-4693-42f1-96ae-83ba83ed3618"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.925290 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.925322 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kv4r\" (UniqueName: \"kubernetes.io/projected/ab148f3e-4693-42f1-96ae-83ba83ed3618-kube-api-access-9kv4r\") on node \"crc\" DevicePath \"\"" Jan 27 21:38:44 crc kubenswrapper[4793]: I0127 21:38:44.925331 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab148f3e-4693-42f1-96ae-83ba83ed3618-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.251726 4793 generic.go:334] "Generic (PLEG): container finished" podID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerID="5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f" exitCode=0 Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.251770 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerDied","Data":"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f"} Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.251797 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gq9g9" event={"ID":"ab148f3e-4693-42f1-96ae-83ba83ed3618","Type":"ContainerDied","Data":"78ff732619a55c4c348b8bea07365877679e016b63769534a42f7f87311e5c37"} Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.251814 4793 scope.go:117] "RemoveContainer" containerID="5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.251848 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gq9g9" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.279373 4793 scope.go:117] "RemoveContainer" containerID="643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.295963 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.306098 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gq9g9"] Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.324805 4793 scope.go:117] "RemoveContainer" containerID="9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.366000 4793 scope.go:117] "RemoveContainer" containerID="5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f" Jan 27 21:38:45 crc kubenswrapper[4793]: E0127 21:38:45.371764 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f\": container with ID starting with 5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f not found: ID does not exist" containerID="5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.371836 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f"} err="failed to get container status \"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f\": rpc error: code = NotFound desc = could not find container \"5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f\": container with ID starting with 5b63067d3ba14acc2d32178516335170cf5e9b1e9cc7fda037c7bacf50fb173f not found: ID does not exist" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.371874 4793 scope.go:117] "RemoveContainer" containerID="643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77" Jan 27 21:38:45 crc kubenswrapper[4793]: E0127 21:38:45.374131 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77\": container with ID starting with 643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77 not found: ID does not exist" containerID="643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.374164 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77"} err="failed to get container status \"643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77\": rpc error: code = NotFound desc = could not find container \"643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77\": container with ID starting with 643b2766dbff6cbb3c514e194668dbd13ce3f433fa1c3c16ca547196e9984a77 not found: ID does not exist" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.374183 4793 scope.go:117] "RemoveContainer" containerID="9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3" Jan 27 21:38:45 crc kubenswrapper[4793]: E0127 21:38:45.374893 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3\": container with ID starting with 9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3 not found: ID does not exist" containerID="9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.375032 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3"} err="failed to get container status \"9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3\": rpc error: code = NotFound desc = could not find container \"9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3\": container with ID starting with 9a0c1a54861f656bb034563136a1083e09d13108495728821ae3fe331f7d06f3 not found: ID does not exist" Jan 27 21:38:45 crc kubenswrapper[4793]: I0127 21:38:45.816200 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" path="/var/lib/kubelet/pods/ab148f3e-4693-42f1-96ae-83ba83ed3618/volumes" Jan 27 21:38:48 crc kubenswrapper[4793]: I0127 21:38:48.803478 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:38:48 crc kubenswrapper[4793]: E0127 21:38:48.804124 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:38:52 crc kubenswrapper[4793]: I0127 21:38:52.753910 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:38:52 crc kubenswrapper[4793]: I0127 21:38:52.754479 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:39:01 crc kubenswrapper[4793]: I0127 21:39:01.804336 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:01 crc kubenswrapper[4793]: E0127 21:39:01.805682 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:39:12 crc kubenswrapper[4793]: I0127 21:39:12.803804 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:12 crc kubenswrapper[4793]: E0127 21:39:12.804625 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:39:22 crc kubenswrapper[4793]: I0127 21:39:22.753957 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:39:22 crc kubenswrapper[4793]: I0127 21:39:22.754575 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:39:22 crc kubenswrapper[4793]: I0127 21:39:22.754638 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:39:22 crc kubenswrapper[4793]: I0127 21:39:22.755608 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:39:22 crc kubenswrapper[4793]: I0127 21:39:22.755679 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf" gracePeriod=600 Jan 27 21:39:23 crc kubenswrapper[4793]: I0127 21:39:23.688620 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf" exitCode=0 Jan 27 21:39:23 crc kubenswrapper[4793]: I0127 21:39:23.689385 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf"} Jan 27 21:39:23 crc kubenswrapper[4793]: I0127 21:39:23.689449 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0"} Jan 27 21:39:23 crc kubenswrapper[4793]: I0127 21:39:23.689468 4793 scope.go:117] "RemoveContainer" containerID="8a572ffcbf1e0e0fcf414a9e14765d6ed73d760e068db692e7d331df239ff569" Jan 27 21:39:26 crc kubenswrapper[4793]: I0127 21:39:26.803356 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:26 crc kubenswrapper[4793]: E0127 21:39:26.804306 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:39:40 crc kubenswrapper[4793]: I0127 21:39:40.803758 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:40 crc kubenswrapper[4793]: E0127 21:39:40.805140 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:39:53 crc kubenswrapper[4793]: I0127 21:39:53.803260 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:54 crc kubenswrapper[4793]: I0127 21:39:54.082208 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0"} Jan 27 21:39:57 crc kubenswrapper[4793]: I0127 21:39:57.125855 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" exitCode=1 Jan 27 21:39:57 crc kubenswrapper[4793]: I0127 21:39:57.125919 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0"} Jan 27 21:39:57 crc kubenswrapper[4793]: I0127 21:39:57.126362 4793 scope.go:117] "RemoveContainer" containerID="c8f4027f0021d30d0fd7d32a33a6a2df35e1b585a37b8ddafca5c2e93e9f00d3" Jan 27 21:39:57 crc kubenswrapper[4793]: I0127 21:39:57.127679 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:39:57 crc kubenswrapper[4793]: E0127 21:39:57.128192 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:39:58 crc kubenswrapper[4793]: I0127 21:39:58.242628 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:39:58 crc kubenswrapper[4793]: I0127 21:39:58.243030 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:39:58 crc kubenswrapper[4793]: I0127 21:39:58.243050 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:39:58 crc kubenswrapper[4793]: I0127 21:39:58.243062 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:39:58 crc kubenswrapper[4793]: I0127 21:39:58.243933 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:39:58 crc kubenswrapper[4793]: E0127 21:39:58.244250 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:40:12 crc kubenswrapper[4793]: I0127 21:40:12.804826 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:40:12 crc kubenswrapper[4793]: E0127 21:40:12.805668 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:40:26 crc kubenswrapper[4793]: I0127 21:40:26.803680 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:40:26 crc kubenswrapper[4793]: E0127 21:40:26.804604 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:40:37 crc kubenswrapper[4793]: I0127 21:40:37.212781 4793 scope.go:117] "RemoveContainer" containerID="bbb05aa4dd4e7f932b3ff340d8196b53123c69f4a8c2c854e359f81ad2d377cc" Jan 27 21:40:37 crc kubenswrapper[4793]: I0127 21:40:37.241587 4793 scope.go:117] "RemoveContainer" containerID="ecafb7f701a4bb49a939b9197816670423f0c423dfd1f652625b23a56a5c48f3" Jan 27 21:40:37 crc kubenswrapper[4793]: I0127 21:40:37.261736 4793 scope.go:117] "RemoveContainer" containerID="2b438e9cb9e64ec1b6ca78f830b1a2b7656859b86e92b45903cf5aa0377640e7" Jan 27 21:40:40 crc kubenswrapper[4793]: I0127 21:40:40.804301 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:40:40 crc kubenswrapper[4793]: E0127 21:40:40.805376 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:40:51 crc kubenswrapper[4793]: I0127 21:40:51.803073 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:40:51 crc kubenswrapper[4793]: E0127 21:40:51.803906 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:41:06 crc kubenswrapper[4793]: I0127 21:41:06.803104 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:41:06 crc kubenswrapper[4793]: E0127 21:41:06.803884 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.960933 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:10 crc kubenswrapper[4793]: E0127 21:41:10.961948 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="extract-content" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.961965 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="extract-content" Jan 27 21:41:10 crc kubenswrapper[4793]: E0127 21:41:10.961988 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="extract-utilities" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.961998 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="extract-utilities" Jan 27 21:41:10 crc kubenswrapper[4793]: E0127 21:41:10.962040 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="registry-server" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.962050 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="registry-server" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.962258 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab148f3e-4693-42f1-96ae-83ba83ed3618" containerName="registry-server" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.963962 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:10 crc kubenswrapper[4793]: I0127 21:41:10.972151 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.158795 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.159462 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.159512 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pltkh\" (UniqueName: \"kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.261990 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.262578 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.262726 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pltkh\" (UniqueName: \"kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.262938 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.262648 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.286749 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pltkh\" (UniqueName: \"kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh\") pod \"community-operators-hc5fn\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.304725 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:11 crc kubenswrapper[4793]: I0127 21:41:11.987816 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:12 crc kubenswrapper[4793]: I0127 21:41:12.045379 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerStarted","Data":"7e497548943439a129e0f0e45734de78343452580c9b278565b8db37c9149fea"} Jan 27 21:41:13 crc kubenswrapper[4793]: I0127 21:41:13.058984 4793 generic.go:334] "Generic (PLEG): container finished" podID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerID="688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1" exitCode=0 Jan 27 21:41:13 crc kubenswrapper[4793]: I0127 21:41:13.059084 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerDied","Data":"688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1"} Jan 27 21:41:13 crc kubenswrapper[4793]: I0127 21:41:13.062370 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:41:14 crc kubenswrapper[4793]: I0127 21:41:14.076985 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerStarted","Data":"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f"} Jan 27 21:41:15 crc kubenswrapper[4793]: I0127 21:41:15.095048 4793 generic.go:334] "Generic (PLEG): container finished" podID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerID="115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f" exitCode=0 Jan 27 21:41:15 crc kubenswrapper[4793]: I0127 21:41:15.095120 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerDied","Data":"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f"} Jan 27 21:41:16 crc kubenswrapper[4793]: I0127 21:41:16.108324 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerStarted","Data":"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e"} Jan 27 21:41:16 crc kubenswrapper[4793]: I0127 21:41:16.138320 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hc5fn" podStartSLOduration=3.434746361 podStartE2EDuration="6.138299372s" podCreationTimestamp="2026-01-27 21:41:10 +0000 UTC" firstStartedPulling="2026-01-27 21:41:13.062110234 +0000 UTC m=+5898.452363390" lastFinishedPulling="2026-01-27 21:41:15.765663245 +0000 UTC m=+5901.155916401" observedRunningTime="2026-01-27 21:41:16.127846061 +0000 UTC m=+5901.518099227" watchObservedRunningTime="2026-01-27 21:41:16.138299372 +0000 UTC m=+5901.528552528" Jan 27 21:41:19 crc kubenswrapper[4793]: I0127 21:41:19.804060 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:41:19 crc kubenswrapper[4793]: E0127 21:41:19.804951 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:41:21 crc kubenswrapper[4793]: I0127 21:41:21.305948 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:21 crc kubenswrapper[4793]: I0127 21:41:21.306789 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:21 crc kubenswrapper[4793]: I0127 21:41:21.359746 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:21 crc kubenswrapper[4793]: I0127 21:41:21.418299 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:21 crc kubenswrapper[4793]: I0127 21:41:21.615631 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.387155 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hc5fn" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="registry-server" containerID="cri-o://a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e" gracePeriod=2 Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.848650 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.961474 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pltkh\" (UniqueName: \"kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh\") pod \"f9fc20eb-cc42-4025-a20e-e37aee18587c\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.961695 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content\") pod \"f9fc20eb-cc42-4025-a20e-e37aee18587c\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.961738 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities\") pod \"f9fc20eb-cc42-4025-a20e-e37aee18587c\" (UID: \"f9fc20eb-cc42-4025-a20e-e37aee18587c\") " Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.964079 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities" (OuterVolumeSpecName: "utilities") pod "f9fc20eb-cc42-4025-a20e-e37aee18587c" (UID: "f9fc20eb-cc42-4025-a20e-e37aee18587c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:41:23 crc kubenswrapper[4793]: I0127 21:41:23.974786 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh" (OuterVolumeSpecName: "kube-api-access-pltkh") pod "f9fc20eb-cc42-4025-a20e-e37aee18587c" (UID: "f9fc20eb-cc42-4025-a20e-e37aee18587c"). InnerVolumeSpecName "kube-api-access-pltkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.061714 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9fc20eb-cc42-4025-a20e-e37aee18587c" (UID: "f9fc20eb-cc42-4025-a20e-e37aee18587c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.064529 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pltkh\" (UniqueName: \"kubernetes.io/projected/f9fc20eb-cc42-4025-a20e-e37aee18587c-kube-api-access-pltkh\") on node \"crc\" DevicePath \"\"" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.064584 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.064595 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9fc20eb-cc42-4025-a20e-e37aee18587c-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.403397 4793 generic.go:334] "Generic (PLEG): container finished" podID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerID="a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e" exitCode=0 Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.403450 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerDied","Data":"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e"} Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.403479 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hc5fn" event={"ID":"f9fc20eb-cc42-4025-a20e-e37aee18587c","Type":"ContainerDied","Data":"7e497548943439a129e0f0e45734de78343452580c9b278565b8db37c9149fea"} Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.403497 4793 scope.go:117] "RemoveContainer" containerID="a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.403491 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hc5fn" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.525011 4793 scope.go:117] "RemoveContainer" containerID="115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.554731 4793 scope.go:117] "RemoveContainer" containerID="688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.577215 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.587362 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hc5fn"] Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.615508 4793 scope.go:117] "RemoveContainer" containerID="a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e" Jan 27 21:41:24 crc kubenswrapper[4793]: E0127 21:41:24.616272 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e\": container with ID starting with a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e not found: ID does not exist" containerID="a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.616313 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e"} err="failed to get container status \"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e\": rpc error: code = NotFound desc = could not find container \"a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e\": container with ID starting with a39bc19b12a6792a08f316f218afaac17a6f7da2448f237f9cada6b4c9440c6e not found: ID does not exist" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.616344 4793 scope.go:117] "RemoveContainer" containerID="115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f" Jan 27 21:41:24 crc kubenswrapper[4793]: E0127 21:41:24.616630 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f\": container with ID starting with 115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f not found: ID does not exist" containerID="115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.616655 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f"} err="failed to get container status \"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f\": rpc error: code = NotFound desc = could not find container \"115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f\": container with ID starting with 115d71d20277acc3528f445ebfcd230ffd90831369f4af1722fa2ed9565aa08f not found: ID does not exist" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.616670 4793 scope.go:117] "RemoveContainer" containerID="688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1" Jan 27 21:41:24 crc kubenswrapper[4793]: E0127 21:41:24.617204 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1\": container with ID starting with 688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1 not found: ID does not exist" containerID="688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1" Jan 27 21:41:24 crc kubenswrapper[4793]: I0127 21:41:24.617232 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1"} err="failed to get container status \"688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1\": rpc error: code = NotFound desc = could not find container \"688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1\": container with ID starting with 688b1fbe33651559409656bdd5c65a1bdedd96699d9a18c02d6815089f1c62a1 not found: ID does not exist" Jan 27 21:41:25 crc kubenswrapper[4793]: I0127 21:41:25.815226 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" path="/var/lib/kubelet/pods/f9fc20eb-cc42-4025-a20e-e37aee18587c/volumes" Jan 27 21:41:32 crc kubenswrapper[4793]: I0127 21:41:32.804579 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:41:32 crc kubenswrapper[4793]: E0127 21:41:32.805468 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:41:46 crc kubenswrapper[4793]: I0127 21:41:46.803760 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:41:46 crc kubenswrapper[4793]: E0127 21:41:46.804741 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:41:52 crc kubenswrapper[4793]: I0127 21:41:52.754188 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:41:52 crc kubenswrapper[4793]: I0127 21:41:52.756156 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:41:57 crc kubenswrapper[4793]: I0127 21:41:57.804276 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:41:57 crc kubenswrapper[4793]: E0127 21:41:57.805060 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:42:08 crc kubenswrapper[4793]: I0127 21:42:08.804584 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:42:08 crc kubenswrapper[4793]: E0127 21:42:08.805457 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:42:21 crc kubenswrapper[4793]: I0127 21:42:21.804042 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:42:21 crc kubenswrapper[4793]: E0127 21:42:21.805206 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:42:22 crc kubenswrapper[4793]: I0127 21:42:22.767228 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:42:22 crc kubenswrapper[4793]: I0127 21:42:22.767634 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:42:36 crc kubenswrapper[4793]: I0127 21:42:36.804336 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:42:36 crc kubenswrapper[4793]: E0127 21:42:36.805133 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:42:50 crc kubenswrapper[4793]: I0127 21:42:50.804749 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:42:50 crc kubenswrapper[4793]: E0127 21:42:50.805706 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:42:52 crc kubenswrapper[4793]: I0127 21:42:52.753194 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:42:52 crc kubenswrapper[4793]: I0127 21:42:52.753815 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:42:52 crc kubenswrapper[4793]: I0127 21:42:52.753871 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:42:52 crc kubenswrapper[4793]: I0127 21:42:52.754550 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:42:52 crc kubenswrapper[4793]: I0127 21:42:52.754697 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" gracePeriod=600 Jan 27 21:42:52 crc kubenswrapper[4793]: E0127 21:42:52.879201 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:42:53 crc kubenswrapper[4793]: I0127 21:42:53.619209 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" exitCode=0 Jan 27 21:42:53 crc kubenswrapper[4793]: I0127 21:42:53.619283 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0"} Jan 27 21:42:53 crc kubenswrapper[4793]: I0127 21:42:53.619499 4793 scope.go:117] "RemoveContainer" containerID="295179a9ea0eedaf1310432bceb28bf62632efbbcd77a81e63814abf732bc0cf" Jan 27 21:42:53 crc kubenswrapper[4793]: I0127 21:42:53.620225 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:42:53 crc kubenswrapper[4793]: E0127 21:42:53.620683 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:43:01 crc kubenswrapper[4793]: I0127 21:43:01.804618 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:43:01 crc kubenswrapper[4793]: E0127 21:43:01.806160 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:43:08 crc kubenswrapper[4793]: I0127 21:43:08.844742 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:43:08 crc kubenswrapper[4793]: E0127 21:43:08.848747 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:43:15 crc kubenswrapper[4793]: I0127 21:43:15.816695 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:43:15 crc kubenswrapper[4793]: E0127 21:43:15.817983 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:43:23 crc kubenswrapper[4793]: I0127 21:43:23.804274 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:43:23 crc kubenswrapper[4793]: E0127 21:43:23.805447 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:43:29 crc kubenswrapper[4793]: I0127 21:43:29.804247 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:43:29 crc kubenswrapper[4793]: E0127 21:43:29.805339 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:43:37 crc kubenswrapper[4793]: I0127 21:43:37.803619 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:43:37 crc kubenswrapper[4793]: E0127 21:43:37.804722 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:43:41 crc kubenswrapper[4793]: I0127 21:43:41.806903 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:43:41 crc kubenswrapper[4793]: E0127 21:43:41.808013 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:43:49 crc kubenswrapper[4793]: I0127 21:43:49.803504 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:43:49 crc kubenswrapper[4793]: E0127 21:43:49.804497 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:43:55 crc kubenswrapper[4793]: I0127 21:43:55.819779 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:43:55 crc kubenswrapper[4793]: E0127 21:43:55.820882 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:44:03 crc kubenswrapper[4793]: I0127 21:44:03.803857 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:44:03 crc kubenswrapper[4793]: E0127 21:44:03.805022 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:44:08 crc kubenswrapper[4793]: I0127 21:44:08.803993 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:44:08 crc kubenswrapper[4793]: E0127 21:44:08.804798 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:44:16 crc kubenswrapper[4793]: I0127 21:44:16.803953 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:44:16 crc kubenswrapper[4793]: E0127 21:44:16.804677 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:44:20 crc kubenswrapper[4793]: I0127 21:44:20.805008 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:44:20 crc kubenswrapper[4793]: E0127 21:44:20.806083 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:44:28 crc kubenswrapper[4793]: I0127 21:44:28.804191 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:44:28 crc kubenswrapper[4793]: E0127 21:44:28.804942 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:44:34 crc kubenswrapper[4793]: I0127 21:44:34.803876 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:44:34 crc kubenswrapper[4793]: E0127 21:44:34.804430 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:44:43 crc kubenswrapper[4793]: I0127 21:44:43.803541 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:44:43 crc kubenswrapper[4793]: E0127 21:44:43.804572 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:44:48 crc kubenswrapper[4793]: I0127 21:44:48.804189 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:44:48 crc kubenswrapper[4793]: E0127 21:44:48.804960 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:44:57 crc kubenswrapper[4793]: I0127 21:44:57.803315 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:44:57 crc kubenswrapper[4793]: E0127 21:44:57.804283 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:44:59 crc kubenswrapper[4793]: I0127 21:44:59.803394 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.178906 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr"] Jan 27 21:45:00 crc kubenswrapper[4793]: E0127 21:45:00.179834 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="registry-server" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.179858 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="registry-server" Jan 27 21:45:00 crc kubenswrapper[4793]: E0127 21:45:00.179884 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="extract-content" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.179893 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="extract-content" Jan 27 21:45:00 crc kubenswrapper[4793]: E0127 21:45:00.179927 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="extract-utilities" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.179937 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="extract-utilities" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.180223 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fc20eb-cc42-4025-a20e-e37aee18587c" containerName="registry-server" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.181204 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.183450 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.183858 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.188827 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr"] Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.343704 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrvh5\" (UniqueName: \"kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.343869 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.343976 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.417093 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d"} Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.445587 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.445733 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.445895 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrvh5\" (UniqueName: \"kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.446979 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.453580 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.463460 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrvh5\" (UniqueName: \"kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5\") pod \"collect-profiles-29492505-vcqkr\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:00 crc kubenswrapper[4793]: I0127 21:45:00.511961 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:01 crc kubenswrapper[4793]: W0127 21:45:01.068193 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf539b49d_e419_4610_bfd9_d622db4abf43.slice/crio-63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51 WatchSource:0}: Error finding container 63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51: Status 404 returned error can't find the container with id 63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51 Jan 27 21:45:01 crc kubenswrapper[4793]: I0127 21:45:01.068672 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr"] Jan 27 21:45:01 crc kubenswrapper[4793]: I0127 21:45:01.429409 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" event={"ID":"f539b49d-e419-4610-bfd9-d622db4abf43","Type":"ContainerStarted","Data":"8f501427ee7cec7f3124e941a432211fc69546dd9d8c59befada6cff62bd4225"} Jan 27 21:45:01 crc kubenswrapper[4793]: I0127 21:45:01.429452 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" event={"ID":"f539b49d-e419-4610-bfd9-d622db4abf43","Type":"ContainerStarted","Data":"63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51"} Jan 27 21:45:01 crc kubenswrapper[4793]: I0127 21:45:01.459949 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" podStartSLOduration=1.4599288160000001 podStartE2EDuration="1.459928816s" podCreationTimestamp="2026-01-27 21:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 21:45:01.449891445 +0000 UTC m=+6126.840144601" watchObservedRunningTime="2026-01-27 21:45:01.459928816 +0000 UTC m=+6126.850181972" Jan 27 21:45:02 crc kubenswrapper[4793]: I0127 21:45:02.444518 4793 generic.go:334] "Generic (PLEG): container finished" podID="f539b49d-e419-4610-bfd9-d622db4abf43" containerID="8f501427ee7cec7f3124e941a432211fc69546dd9d8c59befada6cff62bd4225" exitCode=0 Jan 27 21:45:02 crc kubenswrapper[4793]: I0127 21:45:02.444615 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" event={"ID":"f539b49d-e419-4610-bfd9-d622db4abf43","Type":"ContainerDied","Data":"8f501427ee7cec7f3124e941a432211fc69546dd9d8c59befada6cff62bd4225"} Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.243486 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.455461 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" exitCode=1 Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.455590 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d"} Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.455918 4793 scope.go:117] "RemoveContainer" containerID="6f15195504fd3eb09e91eba0a2540553c09cffe0d3212ea4be60d7fafe403ef0" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.459211 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:03 crc kubenswrapper[4793]: E0127 21:45:03.459771 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.841121 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.936111 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume\") pod \"f539b49d-e419-4610-bfd9-d622db4abf43\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.936164 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrvh5\" (UniqueName: \"kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5\") pod \"f539b49d-e419-4610-bfd9-d622db4abf43\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.936331 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume\") pod \"f539b49d-e419-4610-bfd9-d622db4abf43\" (UID: \"f539b49d-e419-4610-bfd9-d622db4abf43\") " Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.937053 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume" (OuterVolumeSpecName: "config-volume") pod "f539b49d-e419-4610-bfd9-d622db4abf43" (UID: "f539b49d-e419-4610-bfd9-d622db4abf43"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.950335 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f539b49d-e419-4610-bfd9-d622db4abf43" (UID: "f539b49d-e419-4610-bfd9-d622db4abf43"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 21:45:03 crc kubenswrapper[4793]: I0127 21:45:03.950429 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5" (OuterVolumeSpecName: "kube-api-access-nrvh5") pod "f539b49d-e419-4610-bfd9-d622db4abf43" (UID: "f539b49d-e419-4610-bfd9-d622db4abf43"). InnerVolumeSpecName "kube-api-access-nrvh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.038792 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f539b49d-e419-4610-bfd9-d622db4abf43-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.038849 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrvh5\" (UniqueName: \"kubernetes.io/projected/f539b49d-e419-4610-bfd9-d622db4abf43-kube-api-access-nrvh5\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.038864 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f539b49d-e419-4610-bfd9-d622db4abf43-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.469431 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" event={"ID":"f539b49d-e419-4610-bfd9-d622db4abf43","Type":"ContainerDied","Data":"63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51"} Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.469473 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.469488 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63e605c629e3b20d79345b0c377d32d091b8e68707a6f4620e8028c67e2bfb51" Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.542828 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t"] Jan 27 21:45:04 crc kubenswrapper[4793]: I0127 21:45:04.551977 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492460-c4r2t"] Jan 27 21:45:05 crc kubenswrapper[4793]: I0127 21:45:05.821948 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d9c191a-345f-48b1-8968-6eb448485a65" path="/var/lib/kubelet/pods/7d9c191a-345f-48b1-8968-6eb448485a65/volumes" Jan 27 21:45:08 crc kubenswrapper[4793]: I0127 21:45:08.243062 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:45:08 crc kubenswrapper[4793]: I0127 21:45:08.244700 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:45:08 crc kubenswrapper[4793]: I0127 21:45:08.244739 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:45:08 crc kubenswrapper[4793]: I0127 21:45:08.245658 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:08 crc kubenswrapper[4793]: E0127 21:45:08.245973 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:12 crc kubenswrapper[4793]: I0127 21:45:12.809211 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:45:12 crc kubenswrapper[4793]: E0127 21:45:12.815813 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:45:20 crc kubenswrapper[4793]: I0127 21:45:20.922896 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:20 crc kubenswrapper[4793]: E0127 21:45:20.924124 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f539b49d-e419-4610-bfd9-d622db4abf43" containerName="collect-profiles" Jan 27 21:45:20 crc kubenswrapper[4793]: I0127 21:45:20.924145 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="f539b49d-e419-4610-bfd9-d622db4abf43" containerName="collect-profiles" Jan 27 21:45:20 crc kubenswrapper[4793]: I0127 21:45:20.924395 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="f539b49d-e419-4610-bfd9-d622db4abf43" containerName="collect-profiles" Jan 27 21:45:20 crc kubenswrapper[4793]: I0127 21:45:20.928971 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:20 crc kubenswrapper[4793]: I0127 21:45:20.951904 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.116599 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9wfd\" (UniqueName: \"kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.116669 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.116824 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.218729 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.218796 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9wfd\" (UniqueName: \"kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.218830 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.219415 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.219462 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.247597 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9wfd\" (UniqueName: \"kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd\") pod \"redhat-operators-wd25k\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.264574 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:21 crc kubenswrapper[4793]: I0127 21:45:21.748635 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:22 crc kubenswrapper[4793]: I0127 21:45:22.699488 4793 generic.go:334] "Generic (PLEG): container finished" podID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerID="636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238" exitCode=0 Jan 27 21:45:22 crc kubenswrapper[4793]: I0127 21:45:22.699760 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerDied","Data":"636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238"} Jan 27 21:45:22 crc kubenswrapper[4793]: I0127 21:45:22.700018 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerStarted","Data":"d718a18c93569edc8f837eac0aeeac78c7a789306a5f504772c9b80a47a2a924"} Jan 27 21:45:22 crc kubenswrapper[4793]: I0127 21:45:22.803774 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:22 crc kubenswrapper[4793]: E0127 21:45:22.804042 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:23 crc kubenswrapper[4793]: I0127 21:45:23.710055 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerStarted","Data":"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce"} Jan 27 21:45:26 crc kubenswrapper[4793]: I0127 21:45:26.738489 4793 generic.go:334] "Generic (PLEG): container finished" podID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerID="6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce" exitCode=0 Jan 27 21:45:26 crc kubenswrapper[4793]: I0127 21:45:26.738577 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerDied","Data":"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce"} Jan 27 21:45:27 crc kubenswrapper[4793]: I0127 21:45:27.751502 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerStarted","Data":"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a"} Jan 27 21:45:27 crc kubenswrapper[4793]: I0127 21:45:27.773851 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wd25k" podStartSLOduration=3.04698966 podStartE2EDuration="7.773826303s" podCreationTimestamp="2026-01-27 21:45:20 +0000 UTC" firstStartedPulling="2026-01-27 21:45:22.702190137 +0000 UTC m=+6148.092443303" lastFinishedPulling="2026-01-27 21:45:27.42902679 +0000 UTC m=+6152.819279946" observedRunningTime="2026-01-27 21:45:27.768927595 +0000 UTC m=+6153.159180751" watchObservedRunningTime="2026-01-27 21:45:27.773826303 +0000 UTC m=+6153.164079459" Jan 27 21:45:27 crc kubenswrapper[4793]: I0127 21:45:27.803440 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:45:27 crc kubenswrapper[4793]: E0127 21:45:27.803767 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:45:31 crc kubenswrapper[4793]: I0127 21:45:31.265525 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:31 crc kubenswrapper[4793]: I0127 21:45:31.266175 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:32 crc kubenswrapper[4793]: I0127 21:45:32.324804 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wd25k" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="registry-server" probeResult="failure" output=< Jan 27 21:45:32 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:45:32 crc kubenswrapper[4793]: > Jan 27 21:45:33 crc kubenswrapper[4793]: I0127 21:45:33.803814 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:33 crc kubenswrapper[4793]: E0127 21:45:33.804329 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:37 crc kubenswrapper[4793]: I0127 21:45:37.557241 4793 scope.go:117] "RemoveContainer" containerID="232b60b26dcd2c979f7c2a17d3a909ca5338a10cada79794668c9de3ebf219c8" Jan 27 21:45:41 crc kubenswrapper[4793]: I0127 21:45:41.321151 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:41 crc kubenswrapper[4793]: I0127 21:45:41.378185 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:41 crc kubenswrapper[4793]: I0127 21:45:41.567314 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:41 crc kubenswrapper[4793]: I0127 21:45:41.804515 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:45:41 crc kubenswrapper[4793]: E0127 21:45:41.805819 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:45:42 crc kubenswrapper[4793]: I0127 21:45:42.918697 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wd25k" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="registry-server" containerID="cri-o://5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a" gracePeriod=2 Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.504225 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.665785 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content\") pod \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.665855 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9wfd\" (UniqueName: \"kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd\") pod \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.665882 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities\") pod \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\" (UID: \"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3\") " Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.667161 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities" (OuterVolumeSpecName: "utilities") pod "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" (UID: "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.674086 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd" (OuterVolumeSpecName: "kube-api-access-s9wfd") pod "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" (UID: "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3"). InnerVolumeSpecName "kube-api-access-s9wfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.768977 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9wfd\" (UniqueName: \"kubernetes.io/projected/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-kube-api-access-s9wfd\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.769024 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.794287 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" (UID: "1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.872763 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.929503 4793 generic.go:334] "Generic (PLEG): container finished" podID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerID="5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a" exitCode=0 Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.929666 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerDied","Data":"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a"} Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.929692 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wd25k" event={"ID":"1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3","Type":"ContainerDied","Data":"d718a18c93569edc8f837eac0aeeac78c7a789306a5f504772c9b80a47a2a924"} Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.929711 4793 scope.go:117] "RemoveContainer" containerID="5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.929862 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wd25k" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.989906 4793 scope.go:117] "RemoveContainer" containerID="6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.991888 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:43 crc kubenswrapper[4793]: E0127 21:45:43.992743 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="registry-server" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.992777 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="registry-server" Jan 27 21:45:43 crc kubenswrapper[4793]: E0127 21:45:43.992815 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="extract-content" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.992828 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="extract-content" Jan 27 21:45:43 crc kubenswrapper[4793]: E0127 21:45:43.992855 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="extract-utilities" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.992868 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="extract-utilities" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.993297 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" containerName="registry-server" Jan 27 21:45:43 crc kubenswrapper[4793]: I0127 21:45:43.996212 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.022799 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.048430 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wd25k"] Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.050926 4793 scope.go:117] "RemoveContainer" containerID="636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.061564 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.082847 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.082903 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fh99\" (UniqueName: \"kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.083186 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.109673 4793 scope.go:117] "RemoveContainer" containerID="5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a" Jan 27 21:45:44 crc kubenswrapper[4793]: E0127 21:45:44.110431 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a\": container with ID starting with 5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a not found: ID does not exist" containerID="5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.110494 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a"} err="failed to get container status \"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a\": rpc error: code = NotFound desc = could not find container \"5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a\": container with ID starting with 5cebbff8c9eaaef29056188e1f87f36514930a20c2636915a01edc183126f48a not found: ID does not exist" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.110580 4793 scope.go:117] "RemoveContainer" containerID="6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce" Jan 27 21:45:44 crc kubenswrapper[4793]: E0127 21:45:44.110964 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce\": container with ID starting with 6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce not found: ID does not exist" containerID="6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.111062 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce"} err="failed to get container status \"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce\": rpc error: code = NotFound desc = could not find container \"6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce\": container with ID starting with 6ac9db52763b2d92947ca1b1ead02f286156a9a3c6373e2bc72336b7869aebce not found: ID does not exist" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.111144 4793 scope.go:117] "RemoveContainer" containerID="636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238" Jan 27 21:45:44 crc kubenswrapper[4793]: E0127 21:45:44.111632 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238\": container with ID starting with 636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238 not found: ID does not exist" containerID="636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.111685 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238"} err="failed to get container status \"636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238\": rpc error: code = NotFound desc = could not find container \"636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238\": container with ID starting with 636bea7d158c6bb5e8df3d8284ad087ae6095a900bd5f2d8cc4d4467c422a238 not found: ID does not exist" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.185949 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.186011 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fh99\" (UniqueName: \"kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.186094 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.186669 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.186681 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.208930 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fh99\" (UniqueName: \"kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99\") pod \"redhat-marketplace-qs4tl\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.328193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.865418 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:44 crc kubenswrapper[4793]: I0127 21:45:44.941426 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerStarted","Data":"e321d6886cb47cdf6fa84578914fa3cf6ed078c60424cd9df7702e5de44344ed"} Jan 27 21:45:45 crc kubenswrapper[4793]: I0127 21:45:45.817002 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3" path="/var/lib/kubelet/pods/1ca1d8f6-2bc3-4a33-9ab6-e54920a528c3/volumes" Jan 27 21:45:45 crc kubenswrapper[4793]: I0127 21:45:45.955988 4793 generic.go:334] "Generic (PLEG): container finished" podID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerID="0ad02cceb8710723d4337de8ac293ba33f4b06e87778d5deb9eefb2821363fec" exitCode=0 Jan 27 21:45:45 crc kubenswrapper[4793]: I0127 21:45:45.956065 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerDied","Data":"0ad02cceb8710723d4337de8ac293ba33f4b06e87778d5deb9eefb2821363fec"} Jan 27 21:45:47 crc kubenswrapper[4793]: I0127 21:45:47.803509 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:47 crc kubenswrapper[4793]: E0127 21:45:47.804115 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:47 crc kubenswrapper[4793]: I0127 21:45:47.977727 4793 generic.go:334] "Generic (PLEG): container finished" podID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerID="530541292ad9f73c979c4b01d459a95f217a7901ea30139a2222c1b5cdeda06e" exitCode=0 Jan 27 21:45:47 crc kubenswrapper[4793]: I0127 21:45:47.977767 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerDied","Data":"530541292ad9f73c979c4b01d459a95f217a7901ea30139a2222c1b5cdeda06e"} Jan 27 21:45:48 crc kubenswrapper[4793]: I0127 21:45:48.989397 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerStarted","Data":"c5696776549839c18c564fbd50b7d7fda83b66be2ff70b5483aee206731442dd"} Jan 27 21:45:54 crc kubenswrapper[4793]: I0127 21:45:54.328375 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:54 crc kubenswrapper[4793]: I0127 21:45:54.328945 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:54 crc kubenswrapper[4793]: I0127 21:45:54.380881 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:54 crc kubenswrapper[4793]: I0127 21:45:54.406865 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qs4tl" podStartSLOduration=8.867250761 podStartE2EDuration="11.406831483s" podCreationTimestamp="2026-01-27 21:45:43 +0000 UTC" firstStartedPulling="2026-01-27 21:45:45.95852794 +0000 UTC m=+6171.348781096" lastFinishedPulling="2026-01-27 21:45:48.498108632 +0000 UTC m=+6173.888361818" observedRunningTime="2026-01-27 21:45:49.011318757 +0000 UTC m=+6174.401571923" watchObservedRunningTime="2026-01-27 21:45:54.406831483 +0000 UTC m=+6179.797084679" Jan 27 21:45:55 crc kubenswrapper[4793]: I0127 21:45:55.147778 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:56 crc kubenswrapper[4793]: I0127 21:45:56.804350 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:45:56 crc kubenswrapper[4793]: E0127 21:45:56.805148 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:45:57 crc kubenswrapper[4793]: I0127 21:45:57.965712 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:57 crc kubenswrapper[4793]: I0127 21:45:57.966255 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qs4tl" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="registry-server" containerID="cri-o://c5696776549839c18c564fbd50b7d7fda83b66be2ff70b5483aee206731442dd" gracePeriod=2 Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.125561 4793 generic.go:334] "Generic (PLEG): container finished" podID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerID="c5696776549839c18c564fbd50b7d7fda83b66be2ff70b5483aee206731442dd" exitCode=0 Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.125614 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerDied","Data":"c5696776549839c18c564fbd50b7d7fda83b66be2ff70b5483aee206731442dd"} Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.553121 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.576806 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fh99\" (UniqueName: \"kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99\") pod \"20cdfb5e-974b-42fc-875b-d5aae3365179\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.577032 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities\") pod \"20cdfb5e-974b-42fc-875b-d5aae3365179\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.577068 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content\") pod \"20cdfb5e-974b-42fc-875b-d5aae3365179\" (UID: \"20cdfb5e-974b-42fc-875b-d5aae3365179\") " Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.577784 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities" (OuterVolumeSpecName: "utilities") pod "20cdfb5e-974b-42fc-875b-d5aae3365179" (UID: "20cdfb5e-974b-42fc-875b-d5aae3365179"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.588707 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99" (OuterVolumeSpecName: "kube-api-access-8fh99") pod "20cdfb5e-974b-42fc-875b-d5aae3365179" (UID: "20cdfb5e-974b-42fc-875b-d5aae3365179"). InnerVolumeSpecName "kube-api-access-8fh99". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.605719 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20cdfb5e-974b-42fc-875b-d5aae3365179" (UID: "20cdfb5e-974b-42fc-875b-d5aae3365179"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.782974 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.783002 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cdfb5e-974b-42fc-875b-d5aae3365179-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.783018 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fh99\" (UniqueName: \"kubernetes.io/projected/20cdfb5e-974b-42fc-875b-d5aae3365179-kube-api-access-8fh99\") on node \"crc\" DevicePath \"\"" Jan 27 21:45:58 crc kubenswrapper[4793]: I0127 21:45:58.803479 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:45:58 crc kubenswrapper[4793]: E0127 21:45:58.803963 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.142050 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qs4tl" event={"ID":"20cdfb5e-974b-42fc-875b-d5aae3365179","Type":"ContainerDied","Data":"e321d6886cb47cdf6fa84578914fa3cf6ed078c60424cd9df7702e5de44344ed"} Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.142365 4793 scope.go:117] "RemoveContainer" containerID="c5696776549839c18c564fbd50b7d7fda83b66be2ff70b5483aee206731442dd" Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.142135 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qs4tl" Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.164837 4793 scope.go:117] "RemoveContainer" containerID="530541292ad9f73c979c4b01d459a95f217a7901ea30139a2222c1b5cdeda06e" Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.189524 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.191923 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qs4tl"] Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.221391 4793 scope.go:117] "RemoveContainer" containerID="0ad02cceb8710723d4337de8ac293ba33f4b06e87778d5deb9eefb2821363fec" Jan 27 21:45:59 crc kubenswrapper[4793]: I0127 21:45:59.814476 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" path="/var/lib/kubelet/pods/20cdfb5e-974b-42fc-875b-d5aae3365179/volumes" Jan 27 21:46:07 crc kubenswrapper[4793]: I0127 21:46:07.804391 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:46:07 crc kubenswrapper[4793]: E0127 21:46:07.805182 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:46:09 crc kubenswrapper[4793]: I0127 21:46:09.803457 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:46:09 crc kubenswrapper[4793]: E0127 21:46:09.804006 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:46:18 crc kubenswrapper[4793]: I0127 21:46:18.804321 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:46:18 crc kubenswrapper[4793]: E0127 21:46:18.805116 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:46:24 crc kubenswrapper[4793]: I0127 21:46:24.802951 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:46:24 crc kubenswrapper[4793]: E0127 21:46:24.804107 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:46:33 crc kubenswrapper[4793]: I0127 21:46:33.804898 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:46:33 crc kubenswrapper[4793]: E0127 21:46:33.806367 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:46:35 crc kubenswrapper[4793]: I0127 21:46:35.810044 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:46:35 crc kubenswrapper[4793]: E0127 21:46:35.810500 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:46:45 crc kubenswrapper[4793]: I0127 21:46:45.817987 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:46:45 crc kubenswrapper[4793]: E0127 21:46:45.819327 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:46:50 crc kubenswrapper[4793]: I0127 21:46:50.883616 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:46:50 crc kubenswrapper[4793]: E0127 21:46:50.884325 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:46:56 crc kubenswrapper[4793]: I0127 21:46:56.804045 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:46:56 crc kubenswrapper[4793]: E0127 21:46:56.805509 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:47:02 crc kubenswrapper[4793]: I0127 21:47:02.803864 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:47:02 crc kubenswrapper[4793]: E0127 21:47:02.805127 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:47:09 crc kubenswrapper[4793]: I0127 21:47:09.803948 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:47:09 crc kubenswrapper[4793]: E0127 21:47:09.806208 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:47:14 crc kubenswrapper[4793]: I0127 21:47:14.803310 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:47:14 crc kubenswrapper[4793]: E0127 21:47:14.804362 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:47:23 crc kubenswrapper[4793]: I0127 21:47:23.850836 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:47:23 crc kubenswrapper[4793]: E0127 21:47:23.857279 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:47:29 crc kubenswrapper[4793]: I0127 21:47:29.803787 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:47:29 crc kubenswrapper[4793]: E0127 21:47:29.804794 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:47:34 crc kubenswrapper[4793]: I0127 21:47:34.803779 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:47:34 crc kubenswrapper[4793]: E0127 21:47:34.804883 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:47:43 crc kubenswrapper[4793]: I0127 21:47:43.804963 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:47:43 crc kubenswrapper[4793]: E0127 21:47:43.806094 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:47:45 crc kubenswrapper[4793]: I0127 21:47:45.817980 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:47:45 crc kubenswrapper[4793]: E0127 21:47:45.819884 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:47:57 crc kubenswrapper[4793]: I0127 21:47:57.805716 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:47:57 crc kubenswrapper[4793]: E0127 21:47:57.806759 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:47:58 crc kubenswrapper[4793]: I0127 21:47:58.803897 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:47:59 crc kubenswrapper[4793]: I0127 21:47:59.580559 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059"} Jan 27 21:48:08 crc kubenswrapper[4793]: I0127 21:48:08.805715 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:48:08 crc kubenswrapper[4793]: E0127 21:48:08.806451 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:48:22 crc kubenswrapper[4793]: I0127 21:48:22.804321 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:48:22 crc kubenswrapper[4793]: E0127 21:48:22.805456 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:48:33 crc kubenswrapper[4793]: I0127 21:48:33.804214 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:48:33 crc kubenswrapper[4793]: E0127 21:48:33.804934 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:48:45 crc kubenswrapper[4793]: I0127 21:48:45.817457 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:48:45 crc kubenswrapper[4793]: E0127 21:48:45.820747 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:48:58 crc kubenswrapper[4793]: I0127 21:48:58.804137 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:48:58 crc kubenswrapper[4793]: E0127 21:48:58.804963 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.081301 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:03 crc kubenswrapper[4793]: E0127 21:49:03.082787 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="registry-server" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.082803 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="registry-server" Jan 27 21:49:03 crc kubenswrapper[4793]: E0127 21:49:03.082828 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="extract-utilities" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.082836 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="extract-utilities" Jan 27 21:49:03 crc kubenswrapper[4793]: E0127 21:49:03.082859 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="extract-content" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.082865 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="extract-content" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.083098 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="20cdfb5e-974b-42fc-875b-d5aae3365179" containerName="registry-server" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.084831 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.099339 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.239752 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.239810 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lswsx\" (UniqueName: \"kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.240172 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.342861 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.342919 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lswsx\" (UniqueName: \"kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.343020 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.343598 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.344070 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.367366 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lswsx\" (UniqueName: \"kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx\") pod \"certified-operators-vvzkn\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: I0127 21:49:03.417906 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:03 crc kubenswrapper[4793]: W0127 21:49:03.994296 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd704af2e_f278_46ed_b66c_f170725cd24f.slice/crio-e2d188db7886714575c832060cf945712e6cf039c7893ed66127d7efba07293c WatchSource:0}: Error finding container e2d188db7886714575c832060cf945712e6cf039c7893ed66127d7efba07293c: Status 404 returned error can't find the container with id e2d188db7886714575c832060cf945712e6cf039c7893ed66127d7efba07293c Jan 27 21:49:04 crc kubenswrapper[4793]: I0127 21:49:04.007883 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:04 crc kubenswrapper[4793]: I0127 21:49:04.806513 4793 generic.go:334] "Generic (PLEG): container finished" podID="d704af2e-f278-46ed-b66c-f170725cd24f" containerID="94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f" exitCode=0 Jan 27 21:49:04 crc kubenswrapper[4793]: I0127 21:49:04.806609 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerDied","Data":"94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f"} Jan 27 21:49:04 crc kubenswrapper[4793]: I0127 21:49:04.808778 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:49:04 crc kubenswrapper[4793]: I0127 21:49:04.809818 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerStarted","Data":"e2d188db7886714575c832060cf945712e6cf039c7893ed66127d7efba07293c"} Jan 27 21:49:05 crc kubenswrapper[4793]: I0127 21:49:05.829588 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerStarted","Data":"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352"} Jan 27 21:49:06 crc kubenswrapper[4793]: I0127 21:49:06.837872 4793 generic.go:334] "Generic (PLEG): container finished" podID="d704af2e-f278-46ed-b66c-f170725cd24f" containerID="bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352" exitCode=0 Jan 27 21:49:06 crc kubenswrapper[4793]: I0127 21:49:06.837954 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerDied","Data":"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352"} Jan 27 21:49:07 crc kubenswrapper[4793]: I0127 21:49:07.855177 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerStarted","Data":"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55"} Jan 27 21:49:07 crc kubenswrapper[4793]: I0127 21:49:07.885243 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vvzkn" podStartSLOduration=2.478325212 podStartE2EDuration="4.885221983s" podCreationTimestamp="2026-01-27 21:49:03 +0000 UTC" firstStartedPulling="2026-01-27 21:49:04.808253824 +0000 UTC m=+6370.198507000" lastFinishedPulling="2026-01-27 21:49:07.215150615 +0000 UTC m=+6372.605403771" observedRunningTime="2026-01-27 21:49:07.881249017 +0000 UTC m=+6373.271502223" watchObservedRunningTime="2026-01-27 21:49:07.885221983 +0000 UTC m=+6373.275475139" Jan 27 21:49:09 crc kubenswrapper[4793]: I0127 21:49:09.817574 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:49:09 crc kubenswrapper[4793]: E0127 21:49:09.819082 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:49:13 crc kubenswrapper[4793]: I0127 21:49:13.418280 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:13 crc kubenswrapper[4793]: I0127 21:49:13.419708 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:13 crc kubenswrapper[4793]: I0127 21:49:13.469423 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:13 crc kubenswrapper[4793]: I0127 21:49:13.998919 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:14 crc kubenswrapper[4793]: I0127 21:49:14.057760 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:15 crc kubenswrapper[4793]: I0127 21:49:15.948776 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vvzkn" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="registry-server" containerID="cri-o://19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55" gracePeriod=2 Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.452282 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.552685 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lswsx\" (UniqueName: \"kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx\") pod \"d704af2e-f278-46ed-b66c-f170725cd24f\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.554710 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content\") pod \"d704af2e-f278-46ed-b66c-f170725cd24f\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.554895 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities\") pod \"d704af2e-f278-46ed-b66c-f170725cd24f\" (UID: \"d704af2e-f278-46ed-b66c-f170725cd24f\") " Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.556852 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities" (OuterVolumeSpecName: "utilities") pod "d704af2e-f278-46ed-b66c-f170725cd24f" (UID: "d704af2e-f278-46ed-b66c-f170725cd24f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.574861 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx" (OuterVolumeSpecName: "kube-api-access-lswsx") pod "d704af2e-f278-46ed-b66c-f170725cd24f" (UID: "d704af2e-f278-46ed-b66c-f170725cd24f"). InnerVolumeSpecName "kube-api-access-lswsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.652043 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d704af2e-f278-46ed-b66c-f170725cd24f" (UID: "d704af2e-f278-46ed-b66c-f170725cd24f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.659047 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.659088 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d704af2e-f278-46ed-b66c-f170725cd24f-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.659101 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lswsx\" (UniqueName: \"kubernetes.io/projected/d704af2e-f278-46ed-b66c-f170725cd24f-kube-api-access-lswsx\") on node \"crc\" DevicePath \"\"" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.962015 4793 generic.go:334] "Generic (PLEG): container finished" podID="d704af2e-f278-46ed-b66c-f170725cd24f" containerID="19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55" exitCode=0 Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.962079 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vvzkn" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.962081 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerDied","Data":"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55"} Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.962238 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vvzkn" event={"ID":"d704af2e-f278-46ed-b66c-f170725cd24f","Type":"ContainerDied","Data":"e2d188db7886714575c832060cf945712e6cf039c7893ed66127d7efba07293c"} Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.962276 4793 scope.go:117] "RemoveContainer" containerID="19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55" Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.997867 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:16 crc kubenswrapper[4793]: I0127 21:49:16.999311 4793 scope.go:117] "RemoveContainer" containerID="bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.010299 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vvzkn"] Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.023515 4793 scope.go:117] "RemoveContainer" containerID="94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.073076 4793 scope.go:117] "RemoveContainer" containerID="19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55" Jan 27 21:49:17 crc kubenswrapper[4793]: E0127 21:49:17.073652 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55\": container with ID starting with 19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55 not found: ID does not exist" containerID="19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.073694 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55"} err="failed to get container status \"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55\": rpc error: code = NotFound desc = could not find container \"19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55\": container with ID starting with 19d0df03f5ca5872fb878a2bdfd26b9cab0e961fa34c64aa48abd5eeaf23aa55 not found: ID does not exist" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.073724 4793 scope.go:117] "RemoveContainer" containerID="bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352" Jan 27 21:49:17 crc kubenswrapper[4793]: E0127 21:49:17.074008 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352\": container with ID starting with bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352 not found: ID does not exist" containerID="bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.074028 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352"} err="failed to get container status \"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352\": rpc error: code = NotFound desc = could not find container \"bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352\": container with ID starting with bccebfcdb0a8d8478a32d8deaa514f193247135b8d0b286a0c842dd7461ac352 not found: ID does not exist" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.074041 4793 scope.go:117] "RemoveContainer" containerID="94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f" Jan 27 21:49:17 crc kubenswrapper[4793]: E0127 21:49:17.074296 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f\": container with ID starting with 94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f not found: ID does not exist" containerID="94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.074314 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f"} err="failed to get container status \"94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f\": rpc error: code = NotFound desc = could not find container \"94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f\": container with ID starting with 94207473831d984aa516ecc9ceb91cdf51ec02f93bb3726e62ba9ab60b64fc0f not found: ID does not exist" Jan 27 21:49:17 crc kubenswrapper[4793]: I0127 21:49:17.813694 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" path="/var/lib/kubelet/pods/d704af2e-f278-46ed-b66c-f170725cd24f/volumes" Jan 27 21:49:24 crc kubenswrapper[4793]: I0127 21:49:24.803397 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:49:24 crc kubenswrapper[4793]: E0127 21:49:24.804041 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:49:36 crc kubenswrapper[4793]: I0127 21:49:36.804169 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:49:36 crc kubenswrapper[4793]: E0127 21:49:36.805007 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:49:50 crc kubenswrapper[4793]: I0127 21:49:50.803984 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:49:50 crc kubenswrapper[4793]: E0127 21:49:50.804872 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:50:04 crc kubenswrapper[4793]: I0127 21:50:04.804188 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:50:05 crc kubenswrapper[4793]: I0127 21:50:05.697546 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a"} Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.243001 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.243600 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:50:08 crc kubenswrapper[4793]: E0127 21:50:08.243636 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a is running failed: container process not found" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:50:08 crc kubenswrapper[4793]: E0127 21:50:08.244096 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a is running failed: container process not found" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:50:08 crc kubenswrapper[4793]: E0127 21:50:08.244522 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a is running failed: container process not found" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 21:50:08 crc kubenswrapper[4793]: E0127 21:50:08.244587 4793 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a is running failed: container process not found" probeType="Startup" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerName="watcher-applier" Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.728421 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" exitCode=1 Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.728461 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a"} Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.728495 4793 scope.go:117] "RemoveContainer" containerID="b71038d7b7a424ed9d590c8d176571e67dfbf6904bd443626777f272e3bf3a4d" Jan 27 21:50:08 crc kubenswrapper[4793]: I0127 21:50:08.729343 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:50:08 crc kubenswrapper[4793]: E0127 21:50:08.729774 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:50:18 crc kubenswrapper[4793]: I0127 21:50:18.242842 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:50:18 crc kubenswrapper[4793]: I0127 21:50:18.243369 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:50:18 crc kubenswrapper[4793]: I0127 21:50:18.244181 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:50:18 crc kubenswrapper[4793]: E0127 21:50:18.244413 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:50:22 crc kubenswrapper[4793]: I0127 21:50:22.754141 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:50:22 crc kubenswrapper[4793]: I0127 21:50:22.754776 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:50:32 crc kubenswrapper[4793]: I0127 21:50:32.803426 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:50:32 crc kubenswrapper[4793]: E0127 21:50:32.804226 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:50:46 crc kubenswrapper[4793]: I0127 21:50:46.804041 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:50:46 crc kubenswrapper[4793]: E0127 21:50:46.804902 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:50:52 crc kubenswrapper[4793]: I0127 21:50:52.753444 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:50:52 crc kubenswrapper[4793]: I0127 21:50:52.754155 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:50:58 crc kubenswrapper[4793]: I0127 21:50:58.803833 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:50:58 crc kubenswrapper[4793]: E0127 21:50:58.804892 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:51:10 crc kubenswrapper[4793]: I0127 21:51:10.803366 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:51:10 crc kubenswrapper[4793]: E0127 21:51:10.804293 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:51:22 crc kubenswrapper[4793]: I0127 21:51:22.754088 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:51:22 crc kubenswrapper[4793]: I0127 21:51:22.754507 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:51:22 crc kubenswrapper[4793]: I0127 21:51:22.754598 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:51:22 crc kubenswrapper[4793]: I0127 21:51:22.755371 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:51:22 crc kubenswrapper[4793]: I0127 21:51:22.755423 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059" gracePeriod=600 Jan 27 21:51:23 crc kubenswrapper[4793]: I0127 21:51:23.609542 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059" exitCode=0 Jan 27 21:51:23 crc kubenswrapper[4793]: I0127 21:51:23.609582 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059"} Jan 27 21:51:23 crc kubenswrapper[4793]: I0127 21:51:23.610196 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537"} Jan 27 21:51:23 crc kubenswrapper[4793]: I0127 21:51:23.610226 4793 scope.go:117] "RemoveContainer" containerID="efb708e4818f18015e34c4194a722f8bcff9e811a2164abecad1be0f6f27d4c0" Jan 27 21:51:24 crc kubenswrapper[4793]: I0127 21:51:24.803401 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:51:24 crc kubenswrapper[4793]: E0127 21:51:24.804811 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:51:37 crc kubenswrapper[4793]: I0127 21:51:37.806279 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:51:37 crc kubenswrapper[4793]: E0127 21:51:37.808150 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:51:50 crc kubenswrapper[4793]: I0127 21:51:50.804500 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:51:50 crc kubenswrapper[4793]: E0127 21:51:50.805910 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:52:03 crc kubenswrapper[4793]: I0127 21:52:03.804022 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:52:03 crc kubenswrapper[4793]: E0127 21:52:03.804784 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:52:16 crc kubenswrapper[4793]: I0127 21:52:16.802925 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:52:16 crc kubenswrapper[4793]: E0127 21:52:16.804781 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:52:30 crc kubenswrapper[4793]: I0127 21:52:30.804341 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:52:30 crc kubenswrapper[4793]: E0127 21:52:30.805780 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:52:42 crc kubenswrapper[4793]: I0127 21:52:42.803863 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:52:42 crc kubenswrapper[4793]: E0127 21:52:42.804808 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:52:53 crc kubenswrapper[4793]: I0127 21:52:53.803941 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:52:53 crc kubenswrapper[4793]: E0127 21:52:53.809074 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:53:06 crc kubenswrapper[4793]: I0127 21:53:06.804604 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:53:06 crc kubenswrapper[4793]: E0127 21:53:06.805953 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:53:21 crc kubenswrapper[4793]: I0127 21:53:21.803738 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:53:21 crc kubenswrapper[4793]: E0127 21:53:21.804686 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:53:34 crc kubenswrapper[4793]: I0127 21:53:34.803831 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:53:34 crc kubenswrapper[4793]: E0127 21:53:34.804932 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:53:47 crc kubenswrapper[4793]: I0127 21:53:47.804503 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:53:47 crc kubenswrapper[4793]: E0127 21:53:47.805875 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:53:52 crc kubenswrapper[4793]: I0127 21:53:52.753197 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:53:52 crc kubenswrapper[4793]: I0127 21:53:52.754844 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:53:59 crc kubenswrapper[4793]: I0127 21:53:59.804227 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:53:59 crc kubenswrapper[4793]: E0127 21:53:59.805749 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:54:13 crc kubenswrapper[4793]: I0127 21:54:13.803987 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:54:13 crc kubenswrapper[4793]: E0127 21:54:13.805340 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.733364 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:22 crc kubenswrapper[4793]: E0127 21:54:22.734801 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="extract-content" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.734822 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="extract-content" Jan 27 21:54:22 crc kubenswrapper[4793]: E0127 21:54:22.734882 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="registry-server" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.734893 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="registry-server" Jan 27 21:54:22 crc kubenswrapper[4793]: E0127 21:54:22.734944 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="extract-utilities" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.734955 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="extract-utilities" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.735246 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="d704af2e-f278-46ed-b66c-f170725cd24f" containerName="registry-server" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.739601 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.753109 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.753471 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.758302 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.912292 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzdn4\" (UniqueName: \"kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.912453 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:22 crc kubenswrapper[4793]: I0127 21:54:22.912852 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.015569 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.015729 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzdn4\" (UniqueName: \"kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.015763 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.016100 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.016161 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.039232 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzdn4\" (UniqueName: \"kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4\") pod \"community-operators-6vsmf\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.071994 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:23 crc kubenswrapper[4793]: I0127 21:54:23.685851 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:24 crc kubenswrapper[4793]: I0127 21:54:24.369636 4793 generic.go:334] "Generic (PLEG): container finished" podID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerID="12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8" exitCode=0 Jan 27 21:54:24 crc kubenswrapper[4793]: I0127 21:54:24.369712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerDied","Data":"12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8"} Jan 27 21:54:24 crc kubenswrapper[4793]: I0127 21:54:24.369883 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerStarted","Data":"c878aeb2169f957f92a14247e49eeac37baa2e986cc4f24dec4684b5bc30adb5"} Jan 27 21:54:24 crc kubenswrapper[4793]: I0127 21:54:24.374289 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:54:26 crc kubenswrapper[4793]: I0127 21:54:26.394463 4793 generic.go:334] "Generic (PLEG): container finished" podID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerID="0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620" exitCode=0 Jan 27 21:54:26 crc kubenswrapper[4793]: I0127 21:54:26.394647 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerDied","Data":"0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620"} Jan 27 21:54:27 crc kubenswrapper[4793]: I0127 21:54:27.502929 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerStarted","Data":"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f"} Jan 27 21:54:27 crc kubenswrapper[4793]: I0127 21:54:27.535164 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6vsmf" podStartSLOduration=3.112024462 podStartE2EDuration="5.535124949s" podCreationTimestamp="2026-01-27 21:54:22 +0000 UTC" firstStartedPulling="2026-01-27 21:54:24.373161227 +0000 UTC m=+6689.763414423" lastFinishedPulling="2026-01-27 21:54:26.796261724 +0000 UTC m=+6692.186514910" observedRunningTime="2026-01-27 21:54:27.521355174 +0000 UTC m=+6692.911608330" watchObservedRunningTime="2026-01-27 21:54:27.535124949 +0000 UTC m=+6692.925378135" Jan 27 21:54:27 crc kubenswrapper[4793]: I0127 21:54:27.804863 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:54:27 crc kubenswrapper[4793]: E0127 21:54:27.805510 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:54:33 crc kubenswrapper[4793]: I0127 21:54:33.072368 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:33 crc kubenswrapper[4793]: I0127 21:54:33.072825 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:33 crc kubenswrapper[4793]: I0127 21:54:33.135837 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:33 crc kubenswrapper[4793]: I0127 21:54:33.665235 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:33 crc kubenswrapper[4793]: I0127 21:54:33.730814 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:35 crc kubenswrapper[4793]: I0127 21:54:35.604842 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6vsmf" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="registry-server" containerID="cri-o://0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f" gracePeriod=2 Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.364564 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.490015 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities\") pod \"66fd6942-fe9f-44aa-9084-8803d5ce7677\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.490098 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content\") pod \"66fd6942-fe9f-44aa-9084-8803d5ce7677\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.490192 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzdn4\" (UniqueName: \"kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4\") pod \"66fd6942-fe9f-44aa-9084-8803d5ce7677\" (UID: \"66fd6942-fe9f-44aa-9084-8803d5ce7677\") " Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.490962 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities" (OuterVolumeSpecName: "utilities") pod "66fd6942-fe9f-44aa-9084-8803d5ce7677" (UID: "66fd6942-fe9f-44aa-9084-8803d5ce7677"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.497170 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4" (OuterVolumeSpecName: "kube-api-access-tzdn4") pod "66fd6942-fe9f-44aa-9084-8803d5ce7677" (UID: "66fd6942-fe9f-44aa-9084-8803d5ce7677"). InnerVolumeSpecName "kube-api-access-tzdn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.547348 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66fd6942-fe9f-44aa-9084-8803d5ce7677" (UID: "66fd6942-fe9f-44aa-9084-8803d5ce7677"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.592432 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzdn4\" (UniqueName: \"kubernetes.io/projected/66fd6942-fe9f-44aa-9084-8803d5ce7677-kube-api-access-tzdn4\") on node \"crc\" DevicePath \"\"" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.592474 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.592485 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66fd6942-fe9f-44aa-9084-8803d5ce7677-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.615923 4793 generic.go:334] "Generic (PLEG): container finished" podID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerID="0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f" exitCode=0 Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.615982 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerDied","Data":"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f"} Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.616014 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6vsmf" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.616034 4793 scope.go:117] "RemoveContainer" containerID="0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.616015 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6vsmf" event={"ID":"66fd6942-fe9f-44aa-9084-8803d5ce7677","Type":"ContainerDied","Data":"c878aeb2169f957f92a14247e49eeac37baa2e986cc4f24dec4684b5bc30adb5"} Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.642953 4793 scope.go:117] "RemoveContainer" containerID="0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.681935 4793 scope.go:117] "RemoveContainer" containerID="12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.693175 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.702353 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6vsmf"] Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.737682 4793 scope.go:117] "RemoveContainer" containerID="0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f" Jan 27 21:54:36 crc kubenswrapper[4793]: E0127 21:54:36.738250 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f\": container with ID starting with 0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f not found: ID does not exist" containerID="0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.738288 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f"} err="failed to get container status \"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f\": rpc error: code = NotFound desc = could not find container \"0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f\": container with ID starting with 0ea2736306625c8dab37e62b958270fb66faa47a550505c16f423035a056ce1f not found: ID does not exist" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.738309 4793 scope.go:117] "RemoveContainer" containerID="0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620" Jan 27 21:54:36 crc kubenswrapper[4793]: E0127 21:54:36.738618 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620\": container with ID starting with 0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620 not found: ID does not exist" containerID="0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.738667 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620"} err="failed to get container status \"0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620\": rpc error: code = NotFound desc = could not find container \"0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620\": container with ID starting with 0ae2e7f9bf7c84de05effa354c98e87b51e108c6758bf633c4b676b69cd57620 not found: ID does not exist" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.738822 4793 scope.go:117] "RemoveContainer" containerID="12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8" Jan 27 21:54:36 crc kubenswrapper[4793]: E0127 21:54:36.739169 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8\": container with ID starting with 12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8 not found: ID does not exist" containerID="12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8" Jan 27 21:54:36 crc kubenswrapper[4793]: I0127 21:54:36.739195 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8"} err="failed to get container status \"12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8\": rpc error: code = NotFound desc = could not find container \"12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8\": container with ID starting with 12b251b21fce9ff3522c3203efea0e02626a1490ce44ed4ecda85db324bc24a8 not found: ID does not exist" Jan 27 21:54:37 crc kubenswrapper[4793]: I0127 21:54:37.820998 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" path="/var/lib/kubelet/pods/66fd6942-fe9f-44aa-9084-8803d5ce7677/volumes" Jan 27 21:54:39 crc kubenswrapper[4793]: I0127 21:54:39.803758 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:54:39 crc kubenswrapper[4793]: E0127 21:54:39.804446 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:54:52 crc kubenswrapper[4793]: I0127 21:54:52.754407 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 21:54:52 crc kubenswrapper[4793]: I0127 21:54:52.754943 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 21:54:52 crc kubenswrapper[4793]: I0127 21:54:52.755005 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 21:54:52 crc kubenswrapper[4793]: I0127 21:54:52.755949 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 21:54:52 crc kubenswrapper[4793]: I0127 21:54:52.756021 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" gracePeriod=600 Jan 27 21:54:52 crc kubenswrapper[4793]: E0127 21:54:52.892583 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:54:53 crc kubenswrapper[4793]: I0127 21:54:53.845673 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" exitCode=0 Jan 27 21:54:53 crc kubenswrapper[4793]: I0127 21:54:53.845817 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537"} Jan 27 21:54:53 crc kubenswrapper[4793]: I0127 21:54:53.846032 4793 scope.go:117] "RemoveContainer" containerID="5652ae133c279bea8059fc37b4a1b976334ddc61cf5c85d8b44e502e31a49059" Jan 27 21:54:53 crc kubenswrapper[4793]: I0127 21:54:53.848320 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:54:53 crc kubenswrapper[4793]: E0127 21:54:53.848881 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:54:54 crc kubenswrapper[4793]: I0127 21:54:54.804715 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:54:54 crc kubenswrapper[4793]: E0127 21:54:54.805525 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:07 crc kubenswrapper[4793]: I0127 21:55:07.804437 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:55:07 crc kubenswrapper[4793]: E0127 21:55:07.805595 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:55:07 crc kubenswrapper[4793]: I0127 21:55:07.805901 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:55:09 crc kubenswrapper[4793]: I0127 21:55:09.067764 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7"} Jan 27 21:55:11 crc kubenswrapper[4793]: I0127 21:55:11.099131 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" exitCode=1 Jan 27 21:55:11 crc kubenswrapper[4793]: I0127 21:55:11.099213 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7"} Jan 27 21:55:11 crc kubenswrapper[4793]: I0127 21:55:11.099658 4793 scope.go:117] "RemoveContainer" containerID="81bfedcdaa2389fc476349ba263f8a29bab23dc9f2c156f9aa540de2193e778a" Jan 27 21:55:11 crc kubenswrapper[4793]: I0127 21:55:11.100899 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:11 crc kubenswrapper[4793]: E0127 21:55:11.101755 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:13 crc kubenswrapper[4793]: I0127 21:55:13.242502 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 21:55:13 crc kubenswrapper[4793]: I0127 21:55:13.244609 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:13 crc kubenswrapper[4793]: E0127 21:55:13.245069 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:18 crc kubenswrapper[4793]: I0127 21:55:18.242936 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:55:18 crc kubenswrapper[4793]: I0127 21:55:18.244000 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:55:18 crc kubenswrapper[4793]: I0127 21:55:18.244047 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 21:55:18 crc kubenswrapper[4793]: I0127 21:55:18.245173 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:18 crc kubenswrapper[4793]: E0127 21:55:18.245740 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:20 crc kubenswrapper[4793]: I0127 21:55:20.803854 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:55:20 crc kubenswrapper[4793]: E0127 21:55:20.805237 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:55:28 crc kubenswrapper[4793]: I0127 21:55:28.803801 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:28 crc kubenswrapper[4793]: E0127 21:55:28.805064 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:31 crc kubenswrapper[4793]: I0127 21:55:31.805393 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:55:31 crc kubenswrapper[4793]: E0127 21:55:31.806018 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:55:42 crc kubenswrapper[4793]: I0127 21:55:42.803451 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:42 crc kubenswrapper[4793]: E0127 21:55:42.805025 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:43 crc kubenswrapper[4793]: I0127 21:55:43.804600 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:55:43 crc kubenswrapper[4793]: E0127 21:55:43.805476 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.470755 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:55:45 crc kubenswrapper[4793]: E0127 21:55:45.471526 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="extract-content" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.471538 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="extract-content" Jan 27 21:55:45 crc kubenswrapper[4793]: E0127 21:55:45.471558 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="registry-server" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.471564 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="registry-server" Jan 27 21:55:45 crc kubenswrapper[4793]: E0127 21:55:45.471587 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="extract-utilities" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.471593 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="extract-utilities" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.471792 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="66fd6942-fe9f-44aa-9084-8803d5ce7677" containerName="registry-server" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.473144 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.490475 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.651752 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.651792 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.652100 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8xv8\" (UniqueName: \"kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.754423 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8xv8\" (UniqueName: \"kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.754618 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.754638 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.755212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.755240 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.776435 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8xv8\" (UniqueName: \"kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8\") pod \"redhat-operators-prwsk\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:45 crc kubenswrapper[4793]: I0127 21:55:45.789531 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:46 crc kubenswrapper[4793]: I0127 21:55:46.417878 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:55:46 crc kubenswrapper[4793]: I0127 21:55:46.574071 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerStarted","Data":"e5b7689e11dbe1b12afbedc6252aa3f7ea5445aacb6777761dc902f903faaea6"} Jan 27 21:55:47 crc kubenswrapper[4793]: I0127 21:55:47.584472 4793 generic.go:334] "Generic (PLEG): container finished" podID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerID="e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d" exitCode=0 Jan 27 21:55:47 crc kubenswrapper[4793]: I0127 21:55:47.584623 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerDied","Data":"e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d"} Jan 27 21:55:48 crc kubenswrapper[4793]: I0127 21:55:48.600510 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerStarted","Data":"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e"} Jan 27 21:55:52 crc kubenswrapper[4793]: I0127 21:55:52.647495 4793 generic.go:334] "Generic (PLEG): container finished" podID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerID="06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e" exitCode=0 Jan 27 21:55:52 crc kubenswrapper[4793]: I0127 21:55:52.647840 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerDied","Data":"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e"} Jan 27 21:55:53 crc kubenswrapper[4793]: I0127 21:55:53.670901 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerStarted","Data":"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf"} Jan 27 21:55:53 crc kubenswrapper[4793]: I0127 21:55:53.692614 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-prwsk" podStartSLOduration=3.014147954 podStartE2EDuration="8.692592555s" podCreationTimestamp="2026-01-27 21:55:45 +0000 UTC" firstStartedPulling="2026-01-27 21:55:47.586637573 +0000 UTC m=+6772.976890729" lastFinishedPulling="2026-01-27 21:55:53.265082164 +0000 UTC m=+6778.655335330" observedRunningTime="2026-01-27 21:55:53.688199678 +0000 UTC m=+6779.078452854" watchObservedRunningTime="2026-01-27 21:55:53.692592555 +0000 UTC m=+6779.082845721" Jan 27 21:55:54 crc kubenswrapper[4793]: I0127 21:55:54.803437 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:55:54 crc kubenswrapper[4793]: E0127 21:55:54.803917 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:55:55 crc kubenswrapper[4793]: I0127 21:55:55.790112 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:55 crc kubenswrapper[4793]: I0127 21:55:55.790395 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:55:56 crc kubenswrapper[4793]: I0127 21:55:56.804396 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:55:56 crc kubenswrapper[4793]: E0127 21:55:56.805130 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:55:56 crc kubenswrapper[4793]: I0127 21:55:56.863541 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-prwsk" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="registry-server" probeResult="failure" output=< Jan 27 21:55:56 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 21:55:56 crc kubenswrapper[4793]: > Jan 27 21:56:05 crc kubenswrapper[4793]: I0127 21:56:05.812611 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:56:05 crc kubenswrapper[4793]: E0127 21:56:05.813674 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:56:05 crc kubenswrapper[4793]: I0127 21:56:05.874845 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:56:05 crc kubenswrapper[4793]: I0127 21:56:05.965236 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:56:06 crc kubenswrapper[4793]: I0127 21:56:06.130353 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:56:07 crc kubenswrapper[4793]: I0127 21:56:07.846086 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-prwsk" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="registry-server" containerID="cri-o://0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf" gracePeriod=2 Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.417847 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.564745 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content\") pod \"edcca022-f5e8-4c86-b487-2b2f9edcac82\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.564839 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities\") pod \"edcca022-f5e8-4c86-b487-2b2f9edcac82\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.565069 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8xv8\" (UniqueName: \"kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8\") pod \"edcca022-f5e8-4c86-b487-2b2f9edcac82\" (UID: \"edcca022-f5e8-4c86-b487-2b2f9edcac82\") " Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.565823 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities" (OuterVolumeSpecName: "utilities") pod "edcca022-f5e8-4c86-b487-2b2f9edcac82" (UID: "edcca022-f5e8-4c86-b487-2b2f9edcac82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.573420 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8" (OuterVolumeSpecName: "kube-api-access-d8xv8") pod "edcca022-f5e8-4c86-b487-2b2f9edcac82" (UID: "edcca022-f5e8-4c86-b487-2b2f9edcac82"). InnerVolumeSpecName "kube-api-access-d8xv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.667657 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8xv8\" (UniqueName: \"kubernetes.io/projected/edcca022-f5e8-4c86-b487-2b2f9edcac82-kube-api-access-d8xv8\") on node \"crc\" DevicePath \"\"" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.667948 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.690894 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "edcca022-f5e8-4c86-b487-2b2f9edcac82" (UID: "edcca022-f5e8-4c86-b487-2b2f9edcac82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.769956 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edcca022-f5e8-4c86-b487-2b2f9edcac82-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.857530 4793 generic.go:334] "Generic (PLEG): container finished" podID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerID="0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf" exitCode=0 Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.857623 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-prwsk" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.857595 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerDied","Data":"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf"} Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.857696 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-prwsk" event={"ID":"edcca022-f5e8-4c86-b487-2b2f9edcac82","Type":"ContainerDied","Data":"e5b7689e11dbe1b12afbedc6252aa3f7ea5445aacb6777761dc902f903faaea6"} Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.857734 4793 scope.go:117] "RemoveContainer" containerID="0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.881289 4793 scope.go:117] "RemoveContainer" containerID="06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.900626 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.911789 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-prwsk"] Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.923371 4793 scope.go:117] "RemoveContainer" containerID="e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.962925 4793 scope.go:117] "RemoveContainer" containerID="0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf" Jan 27 21:56:08 crc kubenswrapper[4793]: E0127 21:56:08.963420 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf\": container with ID starting with 0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf not found: ID does not exist" containerID="0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.963471 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf"} err="failed to get container status \"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf\": rpc error: code = NotFound desc = could not find container \"0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf\": container with ID starting with 0feff10db92c3db3d641d90c3cc6298852514ef0ffc1e397295dab54dd93c2cf not found: ID does not exist" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.963507 4793 scope.go:117] "RemoveContainer" containerID="06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e" Jan 27 21:56:08 crc kubenswrapper[4793]: E0127 21:56:08.963999 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e\": container with ID starting with 06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e not found: ID does not exist" containerID="06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.964048 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e"} err="failed to get container status \"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e\": rpc error: code = NotFound desc = could not find container \"06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e\": container with ID starting with 06cd2e4cf8a6e0f99c6b09e97bc4c8aba7d907861ffa3a1f16313d3ef7a7f39e not found: ID does not exist" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.964086 4793 scope.go:117] "RemoveContainer" containerID="e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d" Jan 27 21:56:08 crc kubenswrapper[4793]: E0127 21:56:08.964645 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d\": container with ID starting with e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d not found: ID does not exist" containerID="e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d" Jan 27 21:56:08 crc kubenswrapper[4793]: I0127 21:56:08.964723 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d"} err="failed to get container status \"e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d\": rpc error: code = NotFound desc = could not find container \"e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d\": container with ID starting with e061905a928289a06001d090ba86b1ee817df752628b271dee3088c740a0e47d not found: ID does not exist" Jan 27 21:56:09 crc kubenswrapper[4793]: I0127 21:56:09.821044 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" path="/var/lib/kubelet/pods/edcca022-f5e8-4c86-b487-2b2f9edcac82/volumes" Jan 27 21:56:11 crc kubenswrapper[4793]: I0127 21:56:11.803950 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:56:11 crc kubenswrapper[4793]: E0127 21:56:11.804945 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:56:17 crc kubenswrapper[4793]: I0127 21:56:17.803280 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:56:17 crc kubenswrapper[4793]: E0127 21:56:17.804019 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:56:26 crc kubenswrapper[4793]: I0127 21:56:26.803482 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:56:26 crc kubenswrapper[4793]: E0127 21:56:26.804307 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:56:30 crc kubenswrapper[4793]: I0127 21:56:30.804764 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:56:30 crc kubenswrapper[4793]: E0127 21:56:30.805968 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:56:37 crc kubenswrapper[4793]: I0127 21:56:37.803970 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:56:37 crc kubenswrapper[4793]: E0127 21:56:37.805508 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:56:43 crc kubenswrapper[4793]: I0127 21:56:43.804040 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:56:43 crc kubenswrapper[4793]: E0127 21:56:43.805719 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:56:51 crc kubenswrapper[4793]: I0127 21:56:51.804885 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:56:51 crc kubenswrapper[4793]: E0127 21:56:51.805934 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:56:55 crc kubenswrapper[4793]: I0127 21:56:55.824483 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:56:55 crc kubenswrapper[4793]: E0127 21:56:55.825302 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:57:05 crc kubenswrapper[4793]: I0127 21:57:05.820042 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:57:05 crc kubenswrapper[4793]: E0127 21:57:05.821194 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:57:10 crc kubenswrapper[4793]: I0127 21:57:10.803046 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:57:10 crc kubenswrapper[4793]: E0127 21:57:10.803790 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:57:18 crc kubenswrapper[4793]: I0127 21:57:18.802517 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:57:18 crc kubenswrapper[4793]: E0127 21:57:18.803278 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:57:23 crc kubenswrapper[4793]: I0127 21:57:23.804464 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:57:23 crc kubenswrapper[4793]: E0127 21:57:23.805708 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:57:32 crc kubenswrapper[4793]: I0127 21:57:32.804271 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:57:32 crc kubenswrapper[4793]: E0127 21:57:32.805217 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:57:37 crc kubenswrapper[4793]: I0127 21:57:37.816570 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:57:37 crc kubenswrapper[4793]: E0127 21:57:37.817799 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:57:45 crc kubenswrapper[4793]: I0127 21:57:45.815666 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:57:45 crc kubenswrapper[4793]: E0127 21:57:45.816971 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:57:52 crc kubenswrapper[4793]: I0127 21:57:52.803205 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:57:52 crc kubenswrapper[4793]: E0127 21:57:52.805114 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:58:00 crc kubenswrapper[4793]: I0127 21:58:00.802728 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:58:00 crc kubenswrapper[4793]: E0127 21:58:00.803433 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:58:04 crc kubenswrapper[4793]: I0127 21:58:04.804154 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:58:04 crc kubenswrapper[4793]: E0127 21:58:04.805284 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:58:13 crc kubenswrapper[4793]: I0127 21:58:13.803123 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:58:13 crc kubenswrapper[4793]: E0127 21:58:13.803899 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:58:15 crc kubenswrapper[4793]: I0127 21:58:15.815458 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:58:15 crc kubenswrapper[4793]: E0127 21:58:15.815995 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:58:27 crc kubenswrapper[4793]: I0127 21:58:27.804154 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:58:27 crc kubenswrapper[4793]: E0127 21:58:27.805081 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:58:28 crc kubenswrapper[4793]: I0127 21:58:28.803922 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:58:28 crc kubenswrapper[4793]: E0127 21:58:28.804312 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:58:38 crc kubenswrapper[4793]: I0127 21:58:38.804757 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:58:38 crc kubenswrapper[4793]: E0127 21:58:38.805723 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:58:43 crc kubenswrapper[4793]: I0127 21:58:43.804160 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:58:43 crc kubenswrapper[4793]: E0127 21:58:43.804731 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:58:49 crc kubenswrapper[4793]: I0127 21:58:49.804791 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:58:49 crc kubenswrapper[4793]: E0127 21:58:49.805763 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:58:56 crc kubenswrapper[4793]: I0127 21:58:56.803402 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:58:56 crc kubenswrapper[4793]: E0127 21:58:56.804191 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:59:01 crc kubenswrapper[4793]: I0127 21:59:01.803885 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:59:01 crc kubenswrapper[4793]: E0127 21:59:01.804970 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:59:10 crc kubenswrapper[4793]: I0127 21:59:10.803190 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:59:10 crc kubenswrapper[4793]: E0127 21:59:10.804144 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:59:15 crc kubenswrapper[4793]: I0127 21:59:15.811854 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:59:15 crc kubenswrapper[4793]: E0127 21:59:15.813438 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:59:25 crc kubenswrapper[4793]: I0127 21:59:25.811437 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:59:25 crc kubenswrapper[4793]: E0127 21:59:25.812444 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:59:29 crc kubenswrapper[4793]: I0127 21:59:29.804063 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:59:29 crc kubenswrapper[4793]: E0127 21:59:29.805163 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:59:37 crc kubenswrapper[4793]: I0127 21:59:37.804611 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:59:37 crc kubenswrapper[4793]: E0127 21:59:37.805806 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 21:59:42 crc kubenswrapper[4793]: I0127 21:59:42.803626 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:59:42 crc kubenswrapper[4793]: E0127 21:59:42.804253 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.395678 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:45 crc kubenswrapper[4793]: E0127 21:59:45.397393 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="registry-server" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.397425 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="registry-server" Jan 27 21:59:45 crc kubenswrapper[4793]: E0127 21:59:45.397480 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="extract-content" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.397497 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="extract-content" Jan 27 21:59:45 crc kubenswrapper[4793]: E0127 21:59:45.397604 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="extract-utilities" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.397633 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="extract-utilities" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.398135 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="edcca022-f5e8-4c86-b487-2b2f9edcac82" containerName="registry-server" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.401092 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.420299 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.486885 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.486930 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.487114 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfj4m\" (UniqueName: \"kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.591026 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfj4m\" (UniqueName: \"kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.591573 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.591626 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.592418 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.592456 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.615538 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfj4m\" (UniqueName: \"kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m\") pod \"certified-operators-mr2kv\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:45 crc kubenswrapper[4793]: I0127 21:59:45.746571 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:46 crc kubenswrapper[4793]: W0127 21:59:46.294873 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92f8599e_77ad_4227_af37_2e820b19d098.slice/crio-f03ac944125bdfdbe452d0dba78ec210f5d38c4aee0545097b9f4d7781600ae3 WatchSource:0}: Error finding container f03ac944125bdfdbe452d0dba78ec210f5d38c4aee0545097b9f4d7781600ae3: Status 404 returned error can't find the container with id f03ac944125bdfdbe452d0dba78ec210f5d38c4aee0545097b9f4d7781600ae3 Jan 27 21:59:46 crc kubenswrapper[4793]: I0127 21:59:46.301148 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:46 crc kubenswrapper[4793]: I0127 21:59:46.941863 4793 generic.go:334] "Generic (PLEG): container finished" podID="92f8599e-77ad-4227-af37-2e820b19d098" containerID="345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5" exitCode=0 Jan 27 21:59:46 crc kubenswrapper[4793]: I0127 21:59:46.941936 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerDied","Data":"345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5"} Jan 27 21:59:46 crc kubenswrapper[4793]: I0127 21:59:46.941981 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerStarted","Data":"f03ac944125bdfdbe452d0dba78ec210f5d38c4aee0545097b9f4d7781600ae3"} Jan 27 21:59:46 crc kubenswrapper[4793]: I0127 21:59:46.945094 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 21:59:47 crc kubenswrapper[4793]: I0127 21:59:47.957309 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerStarted","Data":"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4"} Jan 27 21:59:48 crc kubenswrapper[4793]: I0127 21:59:48.971838 4793 generic.go:334] "Generic (PLEG): container finished" podID="92f8599e-77ad-4227-af37-2e820b19d098" containerID="d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4" exitCode=0 Jan 27 21:59:48 crc kubenswrapper[4793]: I0127 21:59:48.971940 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerDied","Data":"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4"} Jan 27 21:59:49 crc kubenswrapper[4793]: I0127 21:59:49.987226 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerStarted","Data":"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85"} Jan 27 21:59:50 crc kubenswrapper[4793]: I0127 21:59:50.021838 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mr2kv" podStartSLOduration=2.566955134 podStartE2EDuration="5.021808637s" podCreationTimestamp="2026-01-27 21:59:45 +0000 UTC" firstStartedPulling="2026-01-27 21:59:46.94443012 +0000 UTC m=+7012.334683316" lastFinishedPulling="2026-01-27 21:59:49.399283623 +0000 UTC m=+7014.789536819" observedRunningTime="2026-01-27 21:59:50.008899105 +0000 UTC m=+7015.399152271" watchObservedRunningTime="2026-01-27 21:59:50.021808637 +0000 UTC m=+7015.412061803" Jan 27 21:59:52 crc kubenswrapper[4793]: I0127 21:59:52.803398 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 21:59:54 crc kubenswrapper[4793]: I0127 21:59:54.031691 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4"} Jan 27 21:59:55 crc kubenswrapper[4793]: I0127 21:59:55.854580 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 21:59:55 crc kubenswrapper[4793]: E0127 21:59:55.855366 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 21:59:55 crc kubenswrapper[4793]: I0127 21:59:55.859916 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:55 crc kubenswrapper[4793]: I0127 21:59:55.859954 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:55 crc kubenswrapper[4793]: I0127 21:59:55.891069 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:56 crc kubenswrapper[4793]: I0127 21:59:56.127885 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:56 crc kubenswrapper[4793]: I0127 21:59:56.201108 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.094290 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mr2kv" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="registry-server" containerID="cri-o://e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85" gracePeriod=2 Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.716676 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.818639 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities\") pod \"92f8599e-77ad-4227-af37-2e820b19d098\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.818698 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content\") pod \"92f8599e-77ad-4227-af37-2e820b19d098\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.818936 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfj4m\" (UniqueName: \"kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m\") pod \"92f8599e-77ad-4227-af37-2e820b19d098\" (UID: \"92f8599e-77ad-4227-af37-2e820b19d098\") " Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.820439 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities" (OuterVolumeSpecName: "utilities") pod "92f8599e-77ad-4227-af37-2e820b19d098" (UID: "92f8599e-77ad-4227-af37-2e820b19d098"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.830725 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m" (OuterVolumeSpecName: "kube-api-access-qfj4m") pod "92f8599e-77ad-4227-af37-2e820b19d098" (UID: "92f8599e-77ad-4227-af37-2e820b19d098"). InnerVolumeSpecName "kube-api-access-qfj4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.871756 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92f8599e-77ad-4227-af37-2e820b19d098" (UID: "92f8599e-77ad-4227-af37-2e820b19d098"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.922033 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.922061 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92f8599e-77ad-4227-af37-2e820b19d098-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 21:59:58 crc kubenswrapper[4793]: I0127 21:59:58.922073 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfj4m\" (UniqueName: \"kubernetes.io/projected/92f8599e-77ad-4227-af37-2e820b19d098-kube-api-access-qfj4m\") on node \"crc\" DevicePath \"\"" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.104674 4793 generic.go:334] "Generic (PLEG): container finished" podID="92f8599e-77ad-4227-af37-2e820b19d098" containerID="e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85" exitCode=0 Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.104725 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerDied","Data":"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85"} Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.104754 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mr2kv" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.104769 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mr2kv" event={"ID":"92f8599e-77ad-4227-af37-2e820b19d098","Type":"ContainerDied","Data":"f03ac944125bdfdbe452d0dba78ec210f5d38c4aee0545097b9f4d7781600ae3"} Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.104789 4793 scope.go:117] "RemoveContainer" containerID="e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.139041 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.143416 4793 scope.go:117] "RemoveContainer" containerID="d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.161576 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mr2kv"] Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.184625 4793 scope.go:117] "RemoveContainer" containerID="345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.216609 4793 scope.go:117] "RemoveContainer" containerID="e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85" Jan 27 21:59:59 crc kubenswrapper[4793]: E0127 21:59:59.217240 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85\": container with ID starting with e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85 not found: ID does not exist" containerID="e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.217288 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85"} err="failed to get container status \"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85\": rpc error: code = NotFound desc = could not find container \"e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85\": container with ID starting with e6206ce7faa87d32b15e90fbc73ffcc6016d147737e9adf9133ead482f899d85 not found: ID does not exist" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.217481 4793 scope.go:117] "RemoveContainer" containerID="d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4" Jan 27 21:59:59 crc kubenswrapper[4793]: E0127 21:59:59.218953 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4\": container with ID starting with d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4 not found: ID does not exist" containerID="d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.219174 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4"} err="failed to get container status \"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4\": rpc error: code = NotFound desc = could not find container \"d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4\": container with ID starting with d90f1e1e200cf2e2ae34b8907117b5d4f383327079e9b28d27403e4efa6251a4 not found: ID does not exist" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.219196 4793 scope.go:117] "RemoveContainer" containerID="345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5" Jan 27 21:59:59 crc kubenswrapper[4793]: E0127 21:59:59.219645 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5\": container with ID starting with 345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5 not found: ID does not exist" containerID="345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.219673 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5"} err="failed to get container status \"345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5\": rpc error: code = NotFound desc = could not find container \"345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5\": container with ID starting with 345822ad347977d6a56cdd12030c2d071a0782b04932faefc5d5ba11702450e5 not found: ID does not exist" Jan 27 21:59:59 crc kubenswrapper[4793]: I0127 21:59:59.854588 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92f8599e-77ad-4227-af37-2e820b19d098" path="/var/lib/kubelet/pods/92f8599e-77ad-4227-af37-2e820b19d098/volumes" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.199737 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl"] Jan 27 22:00:00 crc kubenswrapper[4793]: E0127 22:00:00.200538 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="extract-utilities" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.200607 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="extract-utilities" Jan 27 22:00:00 crc kubenswrapper[4793]: E0127 22:00:00.200632 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="registry-server" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.200651 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="registry-server" Jan 27 22:00:00 crc kubenswrapper[4793]: E0127 22:00:00.200722 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="extract-content" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.200745 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="extract-content" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.201247 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="92f8599e-77ad-4227-af37-2e820b19d098" containerName="registry-server" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.202667 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.206745 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.206746 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.226470 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl"] Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.356268 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.356512 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2577x\" (UniqueName: \"kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.356679 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.458477 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.458649 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2577x\" (UniqueName: \"kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.458770 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.459721 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.468533 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.501119 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2577x\" (UniqueName: \"kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x\") pod \"collect-profiles-29492520-8h5tl\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.536024 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:00 crc kubenswrapper[4793]: I0127 22:00:00.847886 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl"] Jan 27 22:00:01 crc kubenswrapper[4793]: I0127 22:00:01.151563 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" event={"ID":"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216","Type":"ContainerStarted","Data":"ec0aeae4e2cdb457d681a8e50b51d8817b148ab2c535b02ef1cf26db61baa880"} Jan 27 22:00:01 crc kubenswrapper[4793]: I0127 22:00:01.151935 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" event={"ID":"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216","Type":"ContainerStarted","Data":"e17e1e0d7e459d386aca04dfac0278a98e4ef057f71d06f69f0674cb543b66fa"} Jan 27 22:00:01 crc kubenswrapper[4793]: I0127 22:00:01.173423 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" podStartSLOduration=1.173400377 podStartE2EDuration="1.173400377s" podCreationTimestamp="2026-01-27 22:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 22:00:01.16937297 +0000 UTC m=+7026.559626136" watchObservedRunningTime="2026-01-27 22:00:01.173400377 +0000 UTC m=+7026.563653533" Jan 27 22:00:02 crc kubenswrapper[4793]: I0127 22:00:02.169237 4793 generic.go:334] "Generic (PLEG): container finished" podID="df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" containerID="ec0aeae4e2cdb457d681a8e50b51d8817b148ab2c535b02ef1cf26db61baa880" exitCode=0 Jan 27 22:00:02 crc kubenswrapper[4793]: I0127 22:00:02.169313 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" event={"ID":"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216","Type":"ContainerDied","Data":"ec0aeae4e2cdb457d681a8e50b51d8817b148ab2c535b02ef1cf26db61baa880"} Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.827235 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.934514 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2577x\" (UniqueName: \"kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x\") pod \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.934911 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume\") pod \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.934962 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume\") pod \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\" (UID: \"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216\") " Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.935574 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume" (OuterVolumeSpecName: "config-volume") pod "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" (UID: "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.940714 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x" (OuterVolumeSpecName: "kube-api-access-2577x") pod "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" (UID: "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216"). InnerVolumeSpecName "kube-api-access-2577x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:00:03 crc kubenswrapper[4793]: I0127 22:00:03.953388 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" (UID: "df8023ae-0b9e-4c7d-9c2f-4d1cfb697216"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.037271 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2577x\" (UniqueName: \"kubernetes.io/projected/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-kube-api-access-2577x\") on node \"crc\" DevicePath \"\"" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.037346 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.037380 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.196036 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" event={"ID":"df8023ae-0b9e-4c7d-9c2f-4d1cfb697216","Type":"ContainerDied","Data":"e17e1e0d7e459d386aca04dfac0278a98e4ef057f71d06f69f0674cb543b66fa"} Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.196078 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e17e1e0d7e459d386aca04dfac0278a98e4ef057f71d06f69f0674cb543b66fa" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.196128 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl" Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.301446 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd"] Jan 27 22:00:04 crc kubenswrapper[4793]: I0127 22:00:04.315455 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492475-vftgd"] Jan 27 22:00:05 crc kubenswrapper[4793]: I0127 22:00:05.818101 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b61115a-e157-4d07-bcfe-2440b5a4ff2c" path="/var/lib/kubelet/pods/2b61115a-e157-4d07-bcfe-2440b5a4ff2c/volumes" Jan 27 22:00:09 crc kubenswrapper[4793]: I0127 22:00:09.803456 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 22:00:09 crc kubenswrapper[4793]: E0127 22:00:09.804480 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:00:20 crc kubenswrapper[4793]: I0127 22:00:20.802963 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 22:00:21 crc kubenswrapper[4793]: I0127 22:00:21.428364 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd"} Jan 27 22:00:23 crc kubenswrapper[4793]: I0127 22:00:23.243224 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:00:24 crc kubenswrapper[4793]: I0127 22:00:24.463630 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" exitCode=1 Jan 27 22:00:24 crc kubenswrapper[4793]: I0127 22:00:24.463712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd"} Jan 27 22:00:24 crc kubenswrapper[4793]: I0127 22:00:24.464003 4793 scope.go:117] "RemoveContainer" containerID="f154a87687f40bfcf0cfad01ad9952f205169907e86761b97b971c4fa1594fe7" Jan 27 22:00:24 crc kubenswrapper[4793]: I0127 22:00:24.464426 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:00:24 crc kubenswrapper[4793]: E0127 22:00:24.464696 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:00:28 crc kubenswrapper[4793]: I0127 22:00:28.242427 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:00:28 crc kubenswrapper[4793]: I0127 22:00:28.243059 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:00:28 crc kubenswrapper[4793]: I0127 22:00:28.243076 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:00:28 crc kubenswrapper[4793]: I0127 22:00:28.244400 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:00:28 crc kubenswrapper[4793]: E0127 22:00:28.245011 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:00:38 crc kubenswrapper[4793]: I0127 22:00:38.280998 4793 scope.go:117] "RemoveContainer" containerID="8efd72baeacbc9cbc3af34ef491da75a938404e78e7bc0db78259f0b48bc16a7" Jan 27 22:00:39 crc kubenswrapper[4793]: I0127 22:00:39.804132 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:00:39 crc kubenswrapper[4793]: E0127 22:00:39.804788 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:00:51 crc kubenswrapper[4793]: I0127 22:00:51.804312 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:00:51 crc kubenswrapper[4793]: E0127 22:00:51.805687 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.194608 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29492521-tfmps"] Jan 27 22:01:00 crc kubenswrapper[4793]: E0127 22:01:00.196169 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" containerName="collect-profiles" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.196205 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" containerName="collect-profiles" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.196629 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" containerName="collect-profiles" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.197993 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.212979 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492521-tfmps"] Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.280520 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.280974 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.281245 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.281290 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpllv\" (UniqueName: \"kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.383942 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.384039 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.384327 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.384390 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpllv\" (UniqueName: \"kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.391706 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.394616 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.406532 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.418991 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpllv\" (UniqueName: \"kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv\") pod \"keystone-cron-29492521-tfmps\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:00 crc kubenswrapper[4793]: I0127 22:01:00.537318 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:01 crc kubenswrapper[4793]: I0127 22:01:01.126779 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492521-tfmps"] Jan 27 22:01:01 crc kubenswrapper[4793]: I0127 22:01:01.950060 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492521-tfmps" event={"ID":"53e5f200-1d60-40e6-94e1-dc9d928c2785","Type":"ContainerStarted","Data":"9e6d8d2ff0460e0500d3e2eb8b7e60dd0c596c6f4a5f38b4c24548ff92972540"} Jan 27 22:01:01 crc kubenswrapper[4793]: I0127 22:01:01.950417 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492521-tfmps" event={"ID":"53e5f200-1d60-40e6-94e1-dc9d928c2785","Type":"ContainerStarted","Data":"7c15aecd8a3304bb72fd771c9dfedd94b9c7133de7f7977d4b5123e5b9bf3a82"} Jan 27 22:01:01 crc kubenswrapper[4793]: I0127 22:01:01.977530 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29492521-tfmps" podStartSLOduration=1.977504658 podStartE2EDuration="1.977504658s" podCreationTimestamp="2026-01-27 22:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 22:01:01.965940578 +0000 UTC m=+7087.356193744" watchObservedRunningTime="2026-01-27 22:01:01.977504658 +0000 UTC m=+7087.367757824" Jan 27 22:01:04 crc kubenswrapper[4793]: I0127 22:01:04.985453 4793 generic.go:334] "Generic (PLEG): container finished" podID="53e5f200-1d60-40e6-94e1-dc9d928c2785" containerID="9e6d8d2ff0460e0500d3e2eb8b7e60dd0c596c6f4a5f38b4c24548ff92972540" exitCode=0 Jan 27 22:01:04 crc kubenswrapper[4793]: I0127 22:01:04.985607 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492521-tfmps" event={"ID":"53e5f200-1d60-40e6-94e1-dc9d928c2785","Type":"ContainerDied","Data":"9e6d8d2ff0460e0500d3e2eb8b7e60dd0c596c6f4a5f38b4c24548ff92972540"} Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.509579 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.647754 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys\") pod \"53e5f200-1d60-40e6-94e1-dc9d928c2785\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.647902 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle\") pod \"53e5f200-1d60-40e6-94e1-dc9d928c2785\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.648180 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpllv\" (UniqueName: \"kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv\") pod \"53e5f200-1d60-40e6-94e1-dc9d928c2785\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.648306 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data\") pod \"53e5f200-1d60-40e6-94e1-dc9d928c2785\" (UID: \"53e5f200-1d60-40e6-94e1-dc9d928c2785\") " Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.662177 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv" (OuterVolumeSpecName: "kube-api-access-jpllv") pod "53e5f200-1d60-40e6-94e1-dc9d928c2785" (UID: "53e5f200-1d60-40e6-94e1-dc9d928c2785"). InnerVolumeSpecName "kube-api-access-jpllv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.663412 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "53e5f200-1d60-40e6-94e1-dc9d928c2785" (UID: "53e5f200-1d60-40e6-94e1-dc9d928c2785"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.690862 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53e5f200-1d60-40e6-94e1-dc9d928c2785" (UID: "53e5f200-1d60-40e6-94e1-dc9d928c2785"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.720362 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data" (OuterVolumeSpecName: "config-data") pod "53e5f200-1d60-40e6-94e1-dc9d928c2785" (UID: "53e5f200-1d60-40e6-94e1-dc9d928c2785"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.750657 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.750689 4793 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.750698 4793 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53e5f200-1d60-40e6-94e1-dc9d928c2785-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.750722 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpllv\" (UniqueName: \"kubernetes.io/projected/53e5f200-1d60-40e6-94e1-dc9d928c2785-kube-api-access-jpllv\") on node \"crc\" DevicePath \"\"" Jan 27 22:01:06 crc kubenswrapper[4793]: I0127 22:01:06.804147 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:01:06 crc kubenswrapper[4793]: E0127 22:01:06.804398 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:01:07 crc kubenswrapper[4793]: I0127 22:01:07.012342 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492521-tfmps" event={"ID":"53e5f200-1d60-40e6-94e1-dc9d928c2785","Type":"ContainerDied","Data":"7c15aecd8a3304bb72fd771c9dfedd94b9c7133de7f7977d4b5123e5b9bf3a82"} Jan 27 22:01:07 crc kubenswrapper[4793]: I0127 22:01:07.012408 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c15aecd8a3304bb72fd771c9dfedd94b9c7133de7f7977d4b5123e5b9bf3a82" Jan 27 22:01:07 crc kubenswrapper[4793]: I0127 22:01:07.012497 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492521-tfmps" Jan 27 22:01:21 crc kubenswrapper[4793]: I0127 22:01:21.804363 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:01:21 crc kubenswrapper[4793]: E0127 22:01:21.805640 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:01:35 crc kubenswrapper[4793]: I0127 22:01:35.813692 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:01:35 crc kubenswrapper[4793]: E0127 22:01:35.814482 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:01:46 crc kubenswrapper[4793]: I0127 22:01:46.804631 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:01:46 crc kubenswrapper[4793]: E0127 22:01:46.805906 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:01:59 crc kubenswrapper[4793]: I0127 22:01:59.803725 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:01:59 crc kubenswrapper[4793]: E0127 22:01:59.804408 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.246445 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:00 crc kubenswrapper[4793]: E0127 22:02:00.247515 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53e5f200-1d60-40e6-94e1-dc9d928c2785" containerName="keystone-cron" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.247533 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="53e5f200-1d60-40e6-94e1-dc9d928c2785" containerName="keystone-cron" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.248244 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="53e5f200-1d60-40e6-94e1-dc9d928c2785" containerName="keystone-cron" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.249844 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.267403 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.391810 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j29cq\" (UniqueName: \"kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.392139 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.392226 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.493758 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j29cq\" (UniqueName: \"kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.494181 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.494674 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.494805 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.495078 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.512821 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j29cq\" (UniqueName: \"kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq\") pod \"redhat-marketplace-lmf78\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:00 crc kubenswrapper[4793]: I0127 22:02:00.590193 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:01 crc kubenswrapper[4793]: I0127 22:02:01.094774 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:01 crc kubenswrapper[4793]: W0127 22:02:01.107249 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode629e924_b6dc_4c38_8986_31d838c4fc69.slice/crio-bd14bf04e4b05920d62d147d7a840ac06950f5d19f02b46527b592a079738852 WatchSource:0}: Error finding container bd14bf04e4b05920d62d147d7a840ac06950f5d19f02b46527b592a079738852: Status 404 returned error can't find the container with id bd14bf04e4b05920d62d147d7a840ac06950f5d19f02b46527b592a079738852 Jan 27 22:02:01 crc kubenswrapper[4793]: I0127 22:02:01.707530 4793 generic.go:334] "Generic (PLEG): container finished" podID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerID="a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221" exitCode=0 Jan 27 22:02:01 crc kubenswrapper[4793]: I0127 22:02:01.707684 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerDied","Data":"a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221"} Jan 27 22:02:01 crc kubenswrapper[4793]: I0127 22:02:01.708732 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerStarted","Data":"bd14bf04e4b05920d62d147d7a840ac06950f5d19f02b46527b592a079738852"} Jan 27 22:02:04 crc kubenswrapper[4793]: I0127 22:02:04.749653 4793 generic.go:334] "Generic (PLEG): container finished" podID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerID="1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c" exitCode=0 Jan 27 22:02:04 crc kubenswrapper[4793]: I0127 22:02:04.749743 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerDied","Data":"1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c"} Jan 27 22:02:05 crc kubenswrapper[4793]: I0127 22:02:05.766722 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerStarted","Data":"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894"} Jan 27 22:02:05 crc kubenswrapper[4793]: I0127 22:02:05.797921 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lmf78" podStartSLOduration=2.306096036 podStartE2EDuration="5.797898797s" podCreationTimestamp="2026-01-27 22:02:00 +0000 UTC" firstStartedPulling="2026-01-27 22:02:01.710870362 +0000 UTC m=+7147.101123548" lastFinishedPulling="2026-01-27 22:02:05.202673143 +0000 UTC m=+7150.592926309" observedRunningTime="2026-01-27 22:02:05.795629271 +0000 UTC m=+7151.185882427" watchObservedRunningTime="2026-01-27 22:02:05.797898797 +0000 UTC m=+7151.188151953" Jan 27 22:02:10 crc kubenswrapper[4793]: I0127 22:02:10.591634 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:10 crc kubenswrapper[4793]: I0127 22:02:10.592036 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:10 crc kubenswrapper[4793]: I0127 22:02:10.680841 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:10 crc kubenswrapper[4793]: I0127 22:02:10.921194 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:12 crc kubenswrapper[4793]: I0127 22:02:12.804656 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:02:12 crc kubenswrapper[4793]: E0127 22:02:12.805428 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.031147 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.031708 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lmf78" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="registry-server" containerID="cri-o://6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894" gracePeriod=2 Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.550160 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.570697 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j29cq\" (UniqueName: \"kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq\") pod \"e629e924-b6dc-4c38-8986-31d838c4fc69\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.570775 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content\") pod \"e629e924-b6dc-4c38-8986-31d838c4fc69\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.571019 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities\") pod \"e629e924-b6dc-4c38-8986-31d838c4fc69\" (UID: \"e629e924-b6dc-4c38-8986-31d838c4fc69\") " Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.572151 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities" (OuterVolumeSpecName: "utilities") pod "e629e924-b6dc-4c38-8986-31d838c4fc69" (UID: "e629e924-b6dc-4c38-8986-31d838c4fc69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.578883 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq" (OuterVolumeSpecName: "kube-api-access-j29cq") pod "e629e924-b6dc-4c38-8986-31d838c4fc69" (UID: "e629e924-b6dc-4c38-8986-31d838c4fc69"). InnerVolumeSpecName "kube-api-access-j29cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.643397 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e629e924-b6dc-4c38-8986-31d838c4fc69" (UID: "e629e924-b6dc-4c38-8986-31d838c4fc69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.672678 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.672710 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j29cq\" (UniqueName: \"kubernetes.io/projected/e629e924-b6dc-4c38-8986-31d838c4fc69-kube-api-access-j29cq\") on node \"crc\" DevicePath \"\"" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.672743 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e629e924-b6dc-4c38-8986-31d838c4fc69-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.927229 4793 generic.go:334] "Generic (PLEG): container finished" podID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerID="6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894" exitCode=0 Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.927301 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerDied","Data":"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894"} Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.927331 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmf78" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.927359 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmf78" event={"ID":"e629e924-b6dc-4c38-8986-31d838c4fc69","Type":"ContainerDied","Data":"bd14bf04e4b05920d62d147d7a840ac06950f5d19f02b46527b592a079738852"} Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.927390 4793 scope.go:117] "RemoveContainer" containerID="6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894" Jan 27 22:02:14 crc kubenswrapper[4793]: I0127 22:02:14.957315 4793 scope.go:117] "RemoveContainer" containerID="1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.002917 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.005906 4793 scope.go:117] "RemoveContainer" containerID="a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.014574 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmf78"] Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.041649 4793 scope.go:117] "RemoveContainer" containerID="6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894" Jan 27 22:02:15 crc kubenswrapper[4793]: E0127 22:02:15.042186 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894\": container with ID starting with 6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894 not found: ID does not exist" containerID="6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.042223 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894"} err="failed to get container status \"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894\": rpc error: code = NotFound desc = could not find container \"6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894\": container with ID starting with 6a0009b6f7422ebab80ad62b701f14ea7030a349a8237d9df026ea23939e3894 not found: ID does not exist" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.042250 4793 scope.go:117] "RemoveContainer" containerID="1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c" Jan 27 22:02:15 crc kubenswrapper[4793]: E0127 22:02:15.043153 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c\": container with ID starting with 1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c not found: ID does not exist" containerID="1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.043198 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c"} err="failed to get container status \"1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c\": rpc error: code = NotFound desc = could not find container \"1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c\": container with ID starting with 1385c0a22d938da2eafff3a647b2c8fc2cbebed7aa47ed150aa5461550d8699c not found: ID does not exist" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.043226 4793 scope.go:117] "RemoveContainer" containerID="a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221" Jan 27 22:02:15 crc kubenswrapper[4793]: E0127 22:02:15.043566 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221\": container with ID starting with a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221 not found: ID does not exist" containerID="a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.043598 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221"} err="failed to get container status \"a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221\": rpc error: code = NotFound desc = could not find container \"a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221\": container with ID starting with a676a9636cc55f664f1d33da2430fd81510d414129797beae663e2c29b8a4221 not found: ID does not exist" Jan 27 22:02:15 crc kubenswrapper[4793]: I0127 22:02:15.822858 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" path="/var/lib/kubelet/pods/e629e924-b6dc-4c38-8986-31d838c4fc69/volumes" Jan 27 22:02:22 crc kubenswrapper[4793]: I0127 22:02:22.753466 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:02:22 crc kubenswrapper[4793]: I0127 22:02:22.754222 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:02:26 crc kubenswrapper[4793]: I0127 22:02:26.802901 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:02:26 crc kubenswrapper[4793]: E0127 22:02:26.803610 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:02:40 crc kubenswrapper[4793]: I0127 22:02:40.803608 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:02:40 crc kubenswrapper[4793]: E0127 22:02:40.804501 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:02:52 crc kubenswrapper[4793]: I0127 22:02:52.753424 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:02:52 crc kubenswrapper[4793]: I0127 22:02:52.754101 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:02:55 crc kubenswrapper[4793]: I0127 22:02:55.817621 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:02:55 crc kubenswrapper[4793]: E0127 22:02:55.818650 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:03:09 crc kubenswrapper[4793]: I0127 22:03:09.803893 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:03:09 crc kubenswrapper[4793]: E0127 22:03:09.804762 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:03:22 crc kubenswrapper[4793]: I0127 22:03:22.754052 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:03:22 crc kubenswrapper[4793]: I0127 22:03:22.754918 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:03:22 crc kubenswrapper[4793]: I0127 22:03:22.755200 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:03:22 crc kubenswrapper[4793]: I0127 22:03:22.757397 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:03:22 crc kubenswrapper[4793]: I0127 22:03:22.757684 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4" gracePeriod=600 Jan 27 22:03:23 crc kubenswrapper[4793]: I0127 22:03:23.728458 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4" exitCode=0 Jan 27 22:03:23 crc kubenswrapper[4793]: I0127 22:03:23.728570 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4"} Jan 27 22:03:23 crc kubenswrapper[4793]: I0127 22:03:23.729233 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8"} Jan 27 22:03:23 crc kubenswrapper[4793]: I0127 22:03:23.729266 4793 scope.go:117] "RemoveContainer" containerID="92fabb919aaab55cdf56a714fc73577569b6dfc8dc1d386cb3ee898c65a62537" Jan 27 22:03:24 crc kubenswrapper[4793]: I0127 22:03:24.805046 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:03:24 crc kubenswrapper[4793]: E0127 22:03:24.805806 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:03:38 crc kubenswrapper[4793]: I0127 22:03:38.803627 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:03:38 crc kubenswrapper[4793]: E0127 22:03:38.804383 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:03:53 crc kubenswrapper[4793]: I0127 22:03:53.803839 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:03:53 crc kubenswrapper[4793]: E0127 22:03:53.805196 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:04:04 crc kubenswrapper[4793]: I0127 22:04:04.804455 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:04:04 crc kubenswrapper[4793]: E0127 22:04:04.805571 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:04:19 crc kubenswrapper[4793]: I0127 22:04:19.810116 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:04:19 crc kubenswrapper[4793]: E0127 22:04:19.816471 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:04:33 crc kubenswrapper[4793]: I0127 22:04:33.804336 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:04:33 crc kubenswrapper[4793]: E0127 22:04:33.805175 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:04:48 crc kubenswrapper[4793]: I0127 22:04:48.804133 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:04:48 crc kubenswrapper[4793]: E0127 22:04:48.805342 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.779816 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:04:50 crc kubenswrapper[4793]: E0127 22:04:50.781231 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="extract-content" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.781276 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="extract-content" Jan 27 22:04:50 crc kubenswrapper[4793]: E0127 22:04:50.781352 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="extract-utilities" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.781370 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="extract-utilities" Jan 27 22:04:50 crc kubenswrapper[4793]: E0127 22:04:50.781443 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="registry-server" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.781461 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="registry-server" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.781980 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e629e924-b6dc-4c38-8986-31d838c4fc69" containerName="registry-server" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.785816 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.802701 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.930380 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.930696 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:50 crc kubenswrapper[4793]: I0127 22:04:50.930897 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mn9n\" (UniqueName: \"kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.032980 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.033665 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.033785 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mn9n\" (UniqueName: \"kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.033513 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.034709 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.061451 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mn9n\" (UniqueName: \"kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n\") pod \"community-operators-lmkbd\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.125347 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:04:51 crc kubenswrapper[4793]: I0127 22:04:51.825562 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:04:52 crc kubenswrapper[4793]: I0127 22:04:52.462316 4793 generic.go:334] "Generic (PLEG): container finished" podID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerID="d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601" exitCode=0 Jan 27 22:04:52 crc kubenswrapper[4793]: I0127 22:04:52.462884 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerDied","Data":"d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601"} Jan 27 22:04:52 crc kubenswrapper[4793]: I0127 22:04:52.462951 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerStarted","Data":"91485d8998d38c65657c67b824814698bd0196c3a487febd810fc49e385a6212"} Jan 27 22:04:52 crc kubenswrapper[4793]: I0127 22:04:52.466097 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:04:54 crc kubenswrapper[4793]: I0127 22:04:54.489708 4793 generic.go:334] "Generic (PLEG): container finished" podID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerID="ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b" exitCode=0 Jan 27 22:04:54 crc kubenswrapper[4793]: I0127 22:04:54.489800 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerDied","Data":"ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b"} Jan 27 22:04:55 crc kubenswrapper[4793]: I0127 22:04:55.520843 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerStarted","Data":"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013"} Jan 27 22:04:55 crc kubenswrapper[4793]: I0127 22:04:55.556206 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lmkbd" podStartSLOduration=3.1181282 podStartE2EDuration="5.556185816s" podCreationTimestamp="2026-01-27 22:04:50 +0000 UTC" firstStartedPulling="2026-01-27 22:04:52.465384764 +0000 UTC m=+7317.855637970" lastFinishedPulling="2026-01-27 22:04:54.90344238 +0000 UTC m=+7320.293695586" observedRunningTime="2026-01-27 22:04:55.551591935 +0000 UTC m=+7320.941845111" watchObservedRunningTime="2026-01-27 22:04:55.556185816 +0000 UTC m=+7320.946438982" Jan 27 22:04:59 crc kubenswrapper[4793]: I0127 22:04:59.803961 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:04:59 crc kubenswrapper[4793]: E0127 22:04:59.805178 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:01 crc kubenswrapper[4793]: I0127 22:05:01.126648 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:01 crc kubenswrapper[4793]: I0127 22:05:01.127143 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:01 crc kubenswrapper[4793]: I0127 22:05:01.213957 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:01 crc kubenswrapper[4793]: I0127 22:05:01.895718 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:01 crc kubenswrapper[4793]: I0127 22:05:01.946316 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:05:03 crc kubenswrapper[4793]: I0127 22:05:03.876092 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lmkbd" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="registry-server" containerID="cri-o://1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013" gracePeriod=2 Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.421921 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.504029 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities\") pod \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.504134 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content\") pod \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.504185 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mn9n\" (UniqueName: \"kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n\") pod \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\" (UID: \"41e3db2d-ae88-4e4f-b273-e9e37ea9770b\") " Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.505807 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities" (OuterVolumeSpecName: "utilities") pod "41e3db2d-ae88-4e4f-b273-e9e37ea9770b" (UID: "41e3db2d-ae88-4e4f-b273-e9e37ea9770b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.526644 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n" (OuterVolumeSpecName: "kube-api-access-2mn9n") pod "41e3db2d-ae88-4e4f-b273-e9e37ea9770b" (UID: "41e3db2d-ae88-4e4f-b273-e9e37ea9770b"). InnerVolumeSpecName "kube-api-access-2mn9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.608027 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.608311 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mn9n\" (UniqueName: \"kubernetes.io/projected/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-kube-api-access-2mn9n\") on node \"crc\" DevicePath \"\"" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.778993 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41e3db2d-ae88-4e4f-b273-e9e37ea9770b" (UID: "41e3db2d-ae88-4e4f-b273-e9e37ea9770b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.983804 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41e3db2d-ae88-4e4f-b273-e9e37ea9770b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.992069 4793 generic.go:334] "Generic (PLEG): container finished" podID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerID="1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013" exitCode=0 Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.992125 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerDied","Data":"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013"} Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.992163 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lmkbd" event={"ID":"41e3db2d-ae88-4e4f-b273-e9e37ea9770b","Type":"ContainerDied","Data":"91485d8998d38c65657c67b824814698bd0196c3a487febd810fc49e385a6212"} Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.992179 4793 scope.go:117] "RemoveContainer" containerID="1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013" Jan 27 22:05:04 crc kubenswrapper[4793]: I0127 22:05:04.992304 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lmkbd" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.023678 4793 scope.go:117] "RemoveContainer" containerID="ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.028661 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.051049 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lmkbd"] Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.053287 4793 scope.go:117] "RemoveContainer" containerID="d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.095366 4793 scope.go:117] "RemoveContainer" containerID="1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013" Jan 27 22:05:05 crc kubenswrapper[4793]: E0127 22:05:05.096171 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013\": container with ID starting with 1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013 not found: ID does not exist" containerID="1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.096293 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013"} err="failed to get container status \"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013\": rpc error: code = NotFound desc = could not find container \"1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013\": container with ID starting with 1fb2e66664429b60b443aaa80ae9eb157fd4d6cf8c0001dcf2c6d2a2b4d01013 not found: ID does not exist" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.096377 4793 scope.go:117] "RemoveContainer" containerID="ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b" Jan 27 22:05:05 crc kubenswrapper[4793]: E0127 22:05:05.096957 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b\": container with ID starting with ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b not found: ID does not exist" containerID="ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.097039 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b"} err="failed to get container status \"ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b\": rpc error: code = NotFound desc = could not find container \"ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b\": container with ID starting with ca83cd7516f45cd98066ad6bbb57517d985ca3ae0f55a9b30b7d55e6db44560b not found: ID does not exist" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.097103 4793 scope.go:117] "RemoveContainer" containerID="d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601" Jan 27 22:05:05 crc kubenswrapper[4793]: E0127 22:05:05.097610 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601\": container with ID starting with d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601 not found: ID does not exist" containerID="d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.097659 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601"} err="failed to get container status \"d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601\": rpc error: code = NotFound desc = could not find container \"d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601\": container with ID starting with d28400744e12ff3b9c7dc47626add6cd0879cdbfdb1f6afb48fa1dc5c8e87601 not found: ID does not exist" Jan 27 22:05:05 crc kubenswrapper[4793]: I0127 22:05:05.826876 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" path="/var/lib/kubelet/pods/41e3db2d-ae88-4e4f-b273-e9e37ea9770b/volumes" Jan 27 22:05:12 crc kubenswrapper[4793]: I0127 22:05:12.804434 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:05:12 crc kubenswrapper[4793]: E0127 22:05:12.805503 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:26 crc kubenswrapper[4793]: I0127 22:05:26.804041 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:05:27 crc kubenswrapper[4793]: I0127 22:05:27.330462 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd"} Jan 27 22:05:28 crc kubenswrapper[4793]: I0127 22:05:28.243673 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:05:28 crc kubenswrapper[4793]: I0127 22:05:28.244218 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:05:28 crc kubenswrapper[4793]: I0127 22:05:28.297366 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 22:05:28 crc kubenswrapper[4793]: I0127 22:05:28.398786 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 22:05:30 crc kubenswrapper[4793]: I0127 22:05:30.372261 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" exitCode=1 Jan 27 22:05:30 crc kubenswrapper[4793]: I0127 22:05:30.373882 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd"} Jan 27 22:05:30 crc kubenswrapper[4793]: I0127 22:05:30.374522 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:05:30 crc kubenswrapper[4793]: I0127 22:05:30.374930 4793 scope.go:117] "RemoveContainer" containerID="a581e8981b81087407f72e6ef6acf9f361dd92b8908169e2b18719adb7d8ffdd" Jan 27 22:05:30 crc kubenswrapper[4793]: E0127 22:05:30.375334 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:31 crc kubenswrapper[4793]: I0127 22:05:31.385839 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:05:31 crc kubenswrapper[4793]: E0127 22:05:31.386901 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:33 crc kubenswrapper[4793]: I0127 22:05:33.243091 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:05:33 crc kubenswrapper[4793]: I0127 22:05:33.244886 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:05:33 crc kubenswrapper[4793]: E0127 22:05:33.245738 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:38 crc kubenswrapper[4793]: I0127 22:05:38.242704 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:05:38 crc kubenswrapper[4793]: I0127 22:05:38.243346 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:05:38 crc kubenswrapper[4793]: I0127 22:05:38.244281 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:05:38 crc kubenswrapper[4793]: E0127 22:05:38.244705 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:49 crc kubenswrapper[4793]: I0127 22:05:49.804444 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:05:49 crc kubenswrapper[4793]: E0127 22:05:49.805506 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:05:52 crc kubenswrapper[4793]: I0127 22:05:52.754008 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:05:52 crc kubenswrapper[4793]: I0127 22:05:52.756444 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:06:02 crc kubenswrapper[4793]: I0127 22:06:02.803252 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:06:02 crc kubenswrapper[4793]: E0127 22:06:02.803969 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.498471 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.499598 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="prometheus" containerID="cri-o://7d36850131a63c8578cfc954d30b5b5af3261ba1543f9c3998557f9d1b782589" gracePeriod=600 Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.499819 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="thanos-sidecar" containerID="cri-o://9b81fa38a969610a815769431922e292c17ec191f0c81c0f15e528dbeb35342e" gracePeriod=600 Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.501256 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="config-reloader" containerID="cri-o://31bce5819a13bec1ecd73bfb99e884ed3aac050dd87958abc11a1b61cbbfd5be" gracePeriod=600 Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.900206 4793 generic.go:334] "Generic (PLEG): container finished" podID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerID="9b81fa38a969610a815769431922e292c17ec191f0c81c0f15e528dbeb35342e" exitCode=0 Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.900239 4793 generic.go:334] "Generic (PLEG): container finished" podID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerID="7d36850131a63c8578cfc954d30b5b5af3261ba1543f9c3998557f9d1b782589" exitCode=0 Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.900260 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerDied","Data":"9b81fa38a969610a815769431922e292c17ec191f0c81c0f15e528dbeb35342e"} Jan 27 22:06:06 crc kubenswrapper[4793]: I0127 22:06:06.900287 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerDied","Data":"7d36850131a63c8578cfc954d30b5b5af3261ba1543f9c3998557f9d1b782589"} Jan 27 22:06:07 crc kubenswrapper[4793]: I0127 22:06:07.912857 4793 generic.go:334] "Generic (PLEG): container finished" podID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerID="31bce5819a13bec1ecd73bfb99e884ed3aac050dd87958abc11a1b61cbbfd5be" exitCode=0 Jan 27 22:06:07 crc kubenswrapper[4793]: I0127 22:06:07.913184 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerDied","Data":"31bce5819a13bec1ecd73bfb99e884ed3aac050dd87958abc11a1b61cbbfd5be"} Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.086255 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174629 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174857 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174880 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174924 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174975 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.174994 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176083 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176114 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvmhw\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176157 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176203 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176226 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176278 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.176314 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2\") pod \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\" (UID: \"7079beed-67e6-40c8-b8fe-b482c61c0ee5\") " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.177207 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.177329 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.177710 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.182206 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.182839 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out" (OuterVolumeSpecName: "config-out") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.183868 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.188719 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.189151 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.191607 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw" (OuterVolumeSpecName: "kube-api-access-fvmhw") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "kube-api-access-fvmhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.191709 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.200780 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config" (OuterVolumeSpecName: "config") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.250236 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "pvc-a92de854-8671-4eba-9b5d-1a749083f30b". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.284672 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285066 4793 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285169 4793 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285257 4793 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config-out\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285333 4793 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285428 4793 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285096 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config" (OuterVolumeSpecName: "web-config") pod "7079beed-67e6-40c8-b8fe-b482c61c0ee5" (UID: "7079beed-67e6-40c8-b8fe-b482c61c0ee5"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285639 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") on node \"crc\" " Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285716 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvmhw\" (UniqueName: \"kubernetes.io/projected/7079beed-67e6-40c8-b8fe-b482c61c0ee5-kube-api-access-fvmhw\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285781 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285848 4793 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7079beed-67e6-40c8-b8fe-b482c61c0ee5-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285933 4793 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.285999 4793 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-config\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.324300 4793 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.324740 4793 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a92de854-8671-4eba-9b5d-1a749083f30b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b") on node "crc" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.387809 4793 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7079beed-67e6-40c8-b8fe-b482c61c0ee5-web-config\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.387848 4793 reconciler_common.go:293] "Volume detached for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") on node \"crc\" DevicePath \"\"" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.928591 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7079beed-67e6-40c8-b8fe-b482c61c0ee5","Type":"ContainerDied","Data":"befe29c51bc75645a18d0f70ec4136e3de44b5e67e7201c399ae500cc75092a7"} Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.928678 4793 scope.go:117] "RemoveContainer" containerID="9b81fa38a969610a815769431922e292c17ec191f0c81c0f15e528dbeb35342e" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.929712 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.967157 4793 scope.go:117] "RemoveContainer" containerID="31bce5819a13bec1ecd73bfb99e884ed3aac050dd87958abc11a1b61cbbfd5be" Jan 27 22:06:08 crc kubenswrapper[4793]: I0127 22:06:08.998436 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.001211 4793 scope.go:117] "RemoveContainer" containerID="7d36850131a63c8578cfc954d30b5b5af3261ba1543f9c3998557f9d1b782589" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.022670 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.042684 4793 scope.go:117] "RemoveContainer" containerID="4c1a3bf5c396e29ae30cb20590b0929da652bede8fde41fa9b99919957cf9d28" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.120743 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.121557 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="init-config-reloader" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.121683 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="init-config-reloader" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.121786 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="extract-utilities" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.121863 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="extract-utilities" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.121944 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="extract-content" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122023 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="extract-content" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.122102 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="registry-server" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122183 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="registry-server" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.122379 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="config-reloader" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122448 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="config-reloader" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.122517 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="prometheus" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122603 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="prometheus" Jan 27 22:06:09 crc kubenswrapper[4793]: E0127 22:06:09.122673 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="thanos-sidecar" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122731 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="thanos-sidecar" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.122993 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="thanos-sidecar" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.123097 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="41e3db2d-ae88-4e4f-b273-e9e37ea9770b" containerName="registry-server" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.123212 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="prometheus" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.123294 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" containerName="config-reloader" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.125817 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.128997 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.129628 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.129915 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.131754 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-h8vj4" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.132136 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.132387 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.132708 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.307103 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.309274 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.411146 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.411269 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8230a67d-9b25-4098-8bc7-a934934d4084-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.411299 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.411338 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.411813 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412043 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412296 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412350 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412419 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412480 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412516 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412666 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smdlp\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-kube-api-access-smdlp\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.412769 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515074 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515151 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515208 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515229 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515274 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515292 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515317 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smdlp\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-kube-api-access-smdlp\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515341 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515373 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515425 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8230a67d-9b25-4098-8bc7-a934934d4084-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515442 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.515459 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.516529 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.517036 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.517520 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8230a67d-9b25-4098-8bc7-a934934d4084-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.518827 4793 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.518860 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8312f000923a56e203c1e13376862fe23daea3f78fa537b754b51784691fd00c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.521067 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.522533 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.522682 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8230a67d-9b25-4098-8bc7-a934934d4084-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.523395 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.524212 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.525145 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.525595 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.534327 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8230a67d-9b25-4098-8bc7-a934934d4084-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.541417 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smdlp\" (UniqueName: \"kubernetes.io/projected/8230a67d-9b25-4098-8bc7-a934934d4084-kube-api-access-smdlp\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.570684 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a92de854-8671-4eba-9b5d-1a749083f30b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a92de854-8671-4eba-9b5d-1a749083f30b\") pod \"prometheus-metric-storage-0\" (UID: \"8230a67d-9b25-4098-8bc7-a934934d4084\") " pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.622687 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:09 crc kubenswrapper[4793]: I0127 22:06:09.821019 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7079beed-67e6-40c8-b8fe-b482c61c0ee5" path="/var/lib/kubelet/pods/7079beed-67e6-40c8-b8fe-b482c61c0ee5/volumes" Jan 27 22:06:10 crc kubenswrapper[4793]: I0127 22:06:10.173828 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 22:06:10 crc kubenswrapper[4793]: I0127 22:06:10.953075 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerStarted","Data":"5672f5b6fee8f1d6dc33d14ca99fc62a9d529fbd23fa74dcf719043bd9cdca79"} Jan 27 22:06:14 crc kubenswrapper[4793]: I0127 22:06:14.998432 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerStarted","Data":"614eb9b32a102145eb6e019608b4793aecb038784c23e8b900a4d61a3dbbc930"} Jan 27 22:06:17 crc kubenswrapper[4793]: I0127 22:06:17.804068 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:06:17 crc kubenswrapper[4793]: E0127 22:06:17.804709 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.847787 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.852469 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.861208 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.933875 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfjft\" (UniqueName: \"kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.934245 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:21 crc kubenswrapper[4793]: I0127 22:06:21.934352 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.037083 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfjft\" (UniqueName: \"kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.037220 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.037257 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.037829 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.037891 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.079527 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfjft\" (UniqueName: \"kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft\") pod \"redhat-operators-tmgtx\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.191075 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.698490 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:06:22 crc kubenswrapper[4793]: W0127 22:06:22.726740 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74bb269f_0cd8_448c_b71a_577d487b81f5.slice/crio-090725aedcc2642d6e53a44679ab3de499a3f317520439013e69a028496cbd1b WatchSource:0}: Error finding container 090725aedcc2642d6e53a44679ab3de499a3f317520439013e69a028496cbd1b: Status 404 returned error can't find the container with id 090725aedcc2642d6e53a44679ab3de499a3f317520439013e69a028496cbd1b Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.753409 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:06:22 crc kubenswrapper[4793]: I0127 22:06:22.753464 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:06:23 crc kubenswrapper[4793]: I0127 22:06:23.082168 4793 generic.go:334] "Generic (PLEG): container finished" podID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerID="e290e4e2724902cf62f8bd61764ce0e79fc0a72dec4f9d4c885f3964a0110dfc" exitCode=0 Jan 27 22:06:23 crc kubenswrapper[4793]: I0127 22:06:23.082217 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerDied","Data":"e290e4e2724902cf62f8bd61764ce0e79fc0a72dec4f9d4c885f3964a0110dfc"} Jan 27 22:06:23 crc kubenswrapper[4793]: I0127 22:06:23.082247 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerStarted","Data":"090725aedcc2642d6e53a44679ab3de499a3f317520439013e69a028496cbd1b"} Jan 27 22:06:24 crc kubenswrapper[4793]: I0127 22:06:24.097905 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerStarted","Data":"1494a7f626fcec4b90b2a163ed761b2ad0fd0590347e71c33cac88c50a9db41e"} Jan 27 22:06:24 crc kubenswrapper[4793]: I0127 22:06:24.103933 4793 generic.go:334] "Generic (PLEG): container finished" podID="8230a67d-9b25-4098-8bc7-a934934d4084" containerID="614eb9b32a102145eb6e019608b4793aecb038784c23e8b900a4d61a3dbbc930" exitCode=0 Jan 27 22:06:24 crc kubenswrapper[4793]: I0127 22:06:24.103966 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerDied","Data":"614eb9b32a102145eb6e019608b4793aecb038784c23e8b900a4d61a3dbbc930"} Jan 27 22:06:25 crc kubenswrapper[4793]: I0127 22:06:25.117031 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerStarted","Data":"f2a9d98573753ae26a4e074a52e0b5a8a815699a7b8ba7eeb4c6bada149a4b1e"} Jan 27 22:06:27 crc kubenswrapper[4793]: I0127 22:06:27.141313 4793 generic.go:334] "Generic (PLEG): container finished" podID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerID="1494a7f626fcec4b90b2a163ed761b2ad0fd0590347e71c33cac88c50a9db41e" exitCode=0 Jan 27 22:06:27 crc kubenswrapper[4793]: I0127 22:06:27.141369 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerDied","Data":"1494a7f626fcec4b90b2a163ed761b2ad0fd0590347e71c33cac88c50a9db41e"} Jan 27 22:06:28 crc kubenswrapper[4793]: I0127 22:06:28.161118 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerStarted","Data":"aea0e34b2590fa43a5f81aef6b08c56ebad5c252d5e3a86c539035ecac41dbd6"} Jan 27 22:06:28 crc kubenswrapper[4793]: I0127 22:06:28.190257 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tmgtx" podStartSLOduration=2.745683465 podStartE2EDuration="7.190224718s" podCreationTimestamp="2026-01-27 22:06:21 +0000 UTC" firstStartedPulling="2026-01-27 22:06:23.084498985 +0000 UTC m=+7408.474752161" lastFinishedPulling="2026-01-27 22:06:27.529040228 +0000 UTC m=+7412.919293414" observedRunningTime="2026-01-27 22:06:28.182716966 +0000 UTC m=+7413.572970152" watchObservedRunningTime="2026-01-27 22:06:28.190224718 +0000 UTC m=+7413.580477904" Jan 27 22:06:28 crc kubenswrapper[4793]: I0127 22:06:28.804022 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:06:28 crc kubenswrapper[4793]: E0127 22:06:28.804348 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:06:30 crc kubenswrapper[4793]: I0127 22:06:30.185049 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerStarted","Data":"d17ce76ba9978cbcd6c457575f1d9740c8ca03721dcc79599297cca3172a6a45"} Jan 27 22:06:30 crc kubenswrapper[4793]: I0127 22:06:30.185495 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8230a67d-9b25-4098-8bc7-a934934d4084","Type":"ContainerStarted","Data":"50dad45602c83e891247b3a6d4968d4d28151199573499f38dad2680495989f5"} Jan 27 22:06:30 crc kubenswrapper[4793]: I0127 22:06:30.226755 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=21.22672756 podStartE2EDuration="21.22672756s" podCreationTimestamp="2026-01-27 22:06:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 22:06:30.215009587 +0000 UTC m=+7415.605262763" watchObservedRunningTime="2026-01-27 22:06:30.22672756 +0000 UTC m=+7415.616980716" Jan 27 22:06:32 crc kubenswrapper[4793]: I0127 22:06:32.193142 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:32 crc kubenswrapper[4793]: I0127 22:06:32.194127 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:33 crc kubenswrapper[4793]: I0127 22:06:33.262880 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tmgtx" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" probeResult="failure" output=< Jan 27 22:06:33 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:06:33 crc kubenswrapper[4793]: > Jan 27 22:06:34 crc kubenswrapper[4793]: I0127 22:06:34.622872 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:39 crc kubenswrapper[4793]: I0127 22:06:39.623426 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:39 crc kubenswrapper[4793]: I0127 22:06:39.633200 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:40 crc kubenswrapper[4793]: I0127 22:06:40.302106 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 27 22:06:40 crc kubenswrapper[4793]: I0127 22:06:40.805663 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:06:40 crc kubenswrapper[4793]: E0127 22:06:40.806641 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:06:43 crc kubenswrapper[4793]: I0127 22:06:43.254916 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tmgtx" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" probeResult="failure" output=< Jan 27 22:06:43 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:06:43 crc kubenswrapper[4793]: > Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.676995 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.682072 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.683712 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.687174 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.687423 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.687632 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.692512 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-ltsfq" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.778981 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.779039 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.779113 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882036 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882105 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882169 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882382 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px2q2\" (UniqueName: \"kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882528 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882636 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882672 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882696 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.882717 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.883975 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.884709 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.889301 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.983968 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px2q2\" (UniqueName: \"kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984337 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984364 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984388 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984431 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984470 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984644 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.984922 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.985068 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.989635 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:48 crc kubenswrapper[4793]: I0127 22:06:48.990177 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:49 crc kubenswrapper[4793]: I0127 22:06:49.011780 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px2q2\" (UniqueName: \"kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:49 crc kubenswrapper[4793]: I0127 22:06:49.022502 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " pod="openstack/tempest-tests-tempest" Jan 27 22:06:49 crc kubenswrapper[4793]: I0127 22:06:49.065013 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 22:06:49 crc kubenswrapper[4793]: I0127 22:06:49.568353 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 22:06:49 crc kubenswrapper[4793]: W0127 22:06:49.570924 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c64ec91_a07a_470e_a490_2ad9c6a06248.slice/crio-632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c WatchSource:0}: Error finding container 632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c: Status 404 returned error can't find the container with id 632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c Jan 27 22:06:50 crc kubenswrapper[4793]: I0127 22:06:50.421354 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7c64ec91-a07a-470e-a490-2ad9c6a06248","Type":"ContainerStarted","Data":"632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c"} Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.257455 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.336016 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.753937 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.754026 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.754109 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.755139 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.755210 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" gracePeriod=600 Jan 27 22:06:52 crc kubenswrapper[4793]: I0127 22:06:52.803696 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:06:52 crc kubenswrapper[4793]: E0127 22:06:52.803977 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:06:53 crc kubenswrapper[4793]: I0127 22:06:53.033037 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:06:53 crc kubenswrapper[4793]: I0127 22:06:53.457855 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" exitCode=0 Jan 27 22:06:53 crc kubenswrapper[4793]: I0127 22:06:53.458473 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tmgtx" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" containerID="cri-o://aea0e34b2590fa43a5f81aef6b08c56ebad5c252d5e3a86c539035ecac41dbd6" gracePeriod=2 Jan 27 22:06:53 crc kubenswrapper[4793]: I0127 22:06:53.458073 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8"} Jan 27 22:06:53 crc kubenswrapper[4793]: I0127 22:06:53.458581 4793 scope.go:117] "RemoveContainer" containerID="92e6abc5aed66b383ba3361f29dbe5a66e4e4938ead003c7db11bf8cd34cc5a4" Jan 27 22:06:53 crc kubenswrapper[4793]: E0127 22:06:53.618818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:06:54 crc kubenswrapper[4793]: I0127 22:06:54.475488 4793 generic.go:334] "Generic (PLEG): container finished" podID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerID="aea0e34b2590fa43a5f81aef6b08c56ebad5c252d5e3a86c539035ecac41dbd6" exitCode=0 Jan 27 22:06:54 crc kubenswrapper[4793]: I0127 22:06:54.475601 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerDied","Data":"aea0e34b2590fa43a5f81aef6b08c56ebad5c252d5e3a86c539035ecac41dbd6"} Jan 27 22:06:54 crc kubenswrapper[4793]: I0127 22:06:54.480147 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:06:54 crc kubenswrapper[4793]: E0127 22:06:54.480764 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.149635 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.157517 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.203997 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content\") pod \"74bb269f-0cd8-448c-b71a-577d487b81f5\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.204305 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities\") pod \"74bb269f-0cd8-448c-b71a-577d487b81f5\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.204405 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfjft\" (UniqueName: \"kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft\") pod \"74bb269f-0cd8-448c-b71a-577d487b81f5\" (UID: \"74bb269f-0cd8-448c-b71a-577d487b81f5\") " Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.210077 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft" (OuterVolumeSpecName: "kube-api-access-mfjft") pod "74bb269f-0cd8-448c-b71a-577d487b81f5" (UID: "74bb269f-0cd8-448c-b71a-577d487b81f5"). InnerVolumeSpecName "kube-api-access-mfjft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.221309 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities" (OuterVolumeSpecName: "utilities") pod "74bb269f-0cd8-448c-b71a-577d487b81f5" (UID: "74bb269f-0cd8-448c-b71a-577d487b81f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.328585 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.329406 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfjft\" (UniqueName: \"kubernetes.io/projected/74bb269f-0cd8-448c-b71a-577d487b81f5-kube-api-access-mfjft\") on node \"crc\" DevicePath \"\"" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.333367 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74bb269f-0cd8-448c-b71a-577d487b81f5" (UID: "74bb269f-0cd8-448c-b71a-577d487b81f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.430996 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74bb269f-0cd8-448c-b71a-577d487b81f5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.590591 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tmgtx" event={"ID":"74bb269f-0cd8-448c-b71a-577d487b81f5","Type":"ContainerDied","Data":"090725aedcc2642d6e53a44679ab3de499a3f317520439013e69a028496cbd1b"} Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.590671 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tmgtx" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.591074 4793 scope.go:117] "RemoveContainer" containerID="aea0e34b2590fa43a5f81aef6b08c56ebad5c252d5e3a86c539035ecac41dbd6" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.617628 4793 scope.go:117] "RemoveContainer" containerID="1494a7f626fcec4b90b2a163ed761b2ad0fd0590347e71c33cac88c50a9db41e" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.650472 4793 scope.go:117] "RemoveContainer" containerID="e290e4e2724902cf62f8bd61764ce0e79fc0a72dec4f9d4c885f3964a0110dfc" Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.651039 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.665930 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tmgtx"] Jan 27 22:07:01 crc kubenswrapper[4793]: I0127 22:07:01.830379 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" path="/var/lib/kubelet/pods/74bb269f-0cd8-448c-b71a-577d487b81f5/volumes" Jan 27 22:07:02 crc kubenswrapper[4793]: I0127 22:07:02.602525 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7c64ec91-a07a-470e-a490-2ad9c6a06248","Type":"ContainerStarted","Data":"fd2cf3bb7cf77a00f7145e931525a9f1cc5621f2673ecf530b5f46b0aded5fd0"} Jan 27 22:07:02 crc kubenswrapper[4793]: I0127 22:07:02.641144 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.062369654 podStartE2EDuration="15.641082636s" podCreationTimestamp="2026-01-27 22:06:47 +0000 UTC" firstStartedPulling="2026-01-27 22:06:49.573914422 +0000 UTC m=+7434.964167588" lastFinishedPulling="2026-01-27 22:07:01.152627394 +0000 UTC m=+7446.542880570" observedRunningTime="2026-01-27 22:07:02.640313628 +0000 UTC m=+7448.030566784" watchObservedRunningTime="2026-01-27 22:07:02.641082636 +0000 UTC m=+7448.031335792" Jan 27 22:07:05 crc kubenswrapper[4793]: I0127 22:07:05.814672 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:07:05 crc kubenswrapper[4793]: E0127 22:07:05.816023 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:07:06 crc kubenswrapper[4793]: I0127 22:07:06.803401 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:07:06 crc kubenswrapper[4793]: E0127 22:07:06.804016 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:07:18 crc kubenswrapper[4793]: I0127 22:07:18.808186 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:07:18 crc kubenswrapper[4793]: E0127 22:07:18.811604 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:07:21 crc kubenswrapper[4793]: I0127 22:07:21.803526 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:07:21 crc kubenswrapper[4793]: E0127 22:07:21.804331 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:07:32 crc kubenswrapper[4793]: I0127 22:07:32.804239 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:07:32 crc kubenswrapper[4793]: E0127 22:07:32.805235 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:07:33 crc kubenswrapper[4793]: I0127 22:07:33.804236 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:07:33 crc kubenswrapper[4793]: E0127 22:07:33.804902 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:07:46 crc kubenswrapper[4793]: I0127 22:07:46.803264 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:07:46 crc kubenswrapper[4793]: E0127 22:07:46.804505 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:07:47 crc kubenswrapper[4793]: I0127 22:07:47.804129 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:07:47 crc kubenswrapper[4793]: E0127 22:07:47.804893 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:07:59 crc kubenswrapper[4793]: I0127 22:07:59.804508 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:07:59 crc kubenswrapper[4793]: E0127 22:07:59.805980 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:08:02 crc kubenswrapper[4793]: I0127 22:08:02.803477 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:08:02 crc kubenswrapper[4793]: E0127 22:08:02.804337 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:08:12 crc kubenswrapper[4793]: I0127 22:08:12.804064 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:08:12 crc kubenswrapper[4793]: E0127 22:08:12.805861 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:08:14 crc kubenswrapper[4793]: I0127 22:08:14.804420 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:08:14 crc kubenswrapper[4793]: E0127 22:08:14.806074 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:08:23 crc kubenswrapper[4793]: I0127 22:08:23.804729 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:08:23 crc kubenswrapper[4793]: E0127 22:08:23.806108 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:08:27 crc kubenswrapper[4793]: I0127 22:08:27.804147 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:08:27 crc kubenswrapper[4793]: E0127 22:08:27.805493 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:08:38 crc kubenswrapper[4793]: I0127 22:08:38.803988 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:08:38 crc kubenswrapper[4793]: E0127 22:08:38.805062 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:08:40 crc kubenswrapper[4793]: I0127 22:08:40.805263 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:08:40 crc kubenswrapper[4793]: E0127 22:08:40.806464 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:08:52 crc kubenswrapper[4793]: I0127 22:08:52.803802 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:08:52 crc kubenswrapper[4793]: I0127 22:08:52.804783 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:08:52 crc kubenswrapper[4793]: E0127 22:08:52.805008 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:08:52 crc kubenswrapper[4793]: E0127 22:08:52.805156 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:03 crc kubenswrapper[4793]: I0127 22:09:03.805948 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:09:03 crc kubenswrapper[4793]: E0127 22:09:03.806890 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:09:03 crc kubenswrapper[4793]: I0127 22:09:03.807611 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:09:03 crc kubenswrapper[4793]: E0127 22:09:03.807913 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:14 crc kubenswrapper[4793]: I0127 22:09:14.803887 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:09:14 crc kubenswrapper[4793]: E0127 22:09:14.806875 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:17 crc kubenswrapper[4793]: I0127 22:09:17.803880 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:09:17 crc kubenswrapper[4793]: E0127 22:09:17.804993 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:09:25 crc kubenswrapper[4793]: I0127 22:09:25.879956 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:09:25 crc kubenswrapper[4793]: E0127 22:09:25.880860 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:32 crc kubenswrapper[4793]: I0127 22:09:32.821522 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:09:32 crc kubenswrapper[4793]: E0127 22:09:32.822959 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:09:36 crc kubenswrapper[4793]: I0127 22:09:36.804074 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:09:36 crc kubenswrapper[4793]: E0127 22:09:36.806800 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:44 crc kubenswrapper[4793]: I0127 22:09:44.805732 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:09:44 crc kubenswrapper[4793]: E0127 22:09:44.808893 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:09:50 crc kubenswrapper[4793]: I0127 22:09:50.803897 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:09:50 crc kubenswrapper[4793]: E0127 22:09:50.805055 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:09:56 crc kubenswrapper[4793]: I0127 22:09:56.803661 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:09:56 crc kubenswrapper[4793]: E0127 22:09:56.805008 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:10:04 crc kubenswrapper[4793]: I0127 22:10:04.804155 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:10:04 crc kubenswrapper[4793]: E0127 22:10:04.805509 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:10:10 crc kubenswrapper[4793]: I0127 22:10:10.804110 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:10:10 crc kubenswrapper[4793]: E0127 22:10:10.804962 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:10:19 crc kubenswrapper[4793]: I0127 22:10:19.803797 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:10:19 crc kubenswrapper[4793]: E0127 22:10:19.804882 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:10:22 crc kubenswrapper[4793]: I0127 22:10:22.936102 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:10:22 crc kubenswrapper[4793]: E0127 22:10:22.941205 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:10:30 crc kubenswrapper[4793]: I0127 22:10:30.803828 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:10:31 crc kubenswrapper[4793]: I0127 22:10:31.886537 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f"} Jan 27 22:10:33 crc kubenswrapper[4793]: I0127 22:10:33.243211 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:10:33 crc kubenswrapper[4793]: I0127 22:10:33.915436 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" exitCode=1 Jan 27 22:10:33 crc kubenswrapper[4793]: I0127 22:10:33.915458 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f"} Jan 27 22:10:33 crc kubenswrapper[4793]: I0127 22:10:33.916059 4793 scope.go:117] "RemoveContainer" containerID="625f7b095b4cedc7ab0dbe7c9252f10f1eb9dbfec7da6a46472f9018c74362dd" Jan 27 22:10:33 crc kubenswrapper[4793]: I0127 22:10:33.916970 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:10:33 crc kubenswrapper[4793]: E0127 22:10:33.917580 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:10:36 crc kubenswrapper[4793]: I0127 22:10:36.803165 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:10:36 crc kubenswrapper[4793]: E0127 22:10:36.804282 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:10:38 crc kubenswrapper[4793]: I0127 22:10:38.243367 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:10:38 crc kubenswrapper[4793]: I0127 22:10:38.245133 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:10:38 crc kubenswrapper[4793]: I0127 22:10:38.245383 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:10:38 crc kubenswrapper[4793]: I0127 22:10:38.246505 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:10:38 crc kubenswrapper[4793]: E0127 22:10:38.247144 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:10:38 crc kubenswrapper[4793]: I0127 22:10:38.989324 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:10:38 crc kubenswrapper[4793]: E0127 22:10:38.990519 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:10:48 crc kubenswrapper[4793]: I0127 22:10:48.803967 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:10:48 crc kubenswrapper[4793]: E0127 22:10:48.806688 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:10:53 crc kubenswrapper[4793]: I0127 22:10:53.804146 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:10:53 crc kubenswrapper[4793]: E0127 22:10:53.805508 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:11:03 crc kubenswrapper[4793]: I0127 22:11:03.804602 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:11:03 crc kubenswrapper[4793]: E0127 22:11:03.806006 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:11:04 crc kubenswrapper[4793]: I0127 22:11:04.803073 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:11:04 crc kubenswrapper[4793]: E0127 22:11:04.803982 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:11:15 crc kubenswrapper[4793]: I0127 22:11:15.821459 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:11:15 crc kubenswrapper[4793]: E0127 22:11:15.822798 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:11:16 crc kubenswrapper[4793]: I0127 22:11:16.804522 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:11:16 crc kubenswrapper[4793]: E0127 22:11:16.805164 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:11:26 crc kubenswrapper[4793]: I0127 22:11:26.804212 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:11:26 crc kubenswrapper[4793]: E0127 22:11:26.805512 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:11:31 crc kubenswrapper[4793]: I0127 22:11:31.804322 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:11:31 crc kubenswrapper[4793]: E0127 22:11:31.805608 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:11:37 crc kubenswrapper[4793]: I0127 22:11:37.804597 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:11:37 crc kubenswrapper[4793]: E0127 22:11:37.806429 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:11:45 crc kubenswrapper[4793]: I0127 22:11:45.818643 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:11:45 crc kubenswrapper[4793]: E0127 22:11:45.819868 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:11:48 crc kubenswrapper[4793]: I0127 22:11:48.803974 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:11:48 crc kubenswrapper[4793]: E0127 22:11:48.805062 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:11:58 crc kubenswrapper[4793]: I0127 22:11:58.938487 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:11:58 crc kubenswrapper[4793]: E0127 22:11:58.941465 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:12:00 crc kubenswrapper[4793]: I0127 22:12:00.803975 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:12:01 crc kubenswrapper[4793]: I0127 22:12:01.180959 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437"} Jan 27 22:12:10 crc kubenswrapper[4793]: I0127 22:12:10.805073 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:12:10 crc kubenswrapper[4793]: E0127 22:12:10.807936 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.991906 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:20 crc kubenswrapper[4793]: E0127 22:12:20.993475 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="extract-utilities" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.993501 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="extract-utilities" Jan 27 22:12:20 crc kubenswrapper[4793]: E0127 22:12:20.993588 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.993603 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" Jan 27 22:12:20 crc kubenswrapper[4793]: E0127 22:12:20.993666 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="extract-content" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.993688 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="extract-content" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.994322 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="74bb269f-0cd8-448c-b71a-577d487b81f5" containerName="registry-server" Jan 27 22:12:20 crc kubenswrapper[4793]: I0127 22:12:20.998765 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.015322 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.096775 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.097249 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmp8m\" (UniqueName: \"kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.097603 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.199881 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.199972 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmp8m\" (UniqueName: \"kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.200043 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.200902 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.201225 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.226372 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmp8m\" (UniqueName: \"kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m\") pod \"redhat-marketplace-vv7tt\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.357886 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.804667 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:12:21 crc kubenswrapper[4793]: E0127 22:12:21.805636 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:12:21 crc kubenswrapper[4793]: W0127 22:12:21.887231 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a91e753_c983_47d3_a46e_3ad83d4b4a1e.slice/crio-a873334a975b918215c12450d79b00ce0cf474af985bf1382a9404c4c0b34bd9 WatchSource:0}: Error finding container a873334a975b918215c12450d79b00ce0cf474af985bf1382a9404c4c0b34bd9: Status 404 returned error can't find the container with id a873334a975b918215c12450d79b00ce0cf474af985bf1382a9404c4c0b34bd9 Jan 27 22:12:21 crc kubenswrapper[4793]: I0127 22:12:21.900470 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:22 crc kubenswrapper[4793]: I0127 22:12:22.458924 4793 generic.go:334] "Generic (PLEG): container finished" podID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerID="c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d" exitCode=0 Jan 27 22:12:22 crc kubenswrapper[4793]: I0127 22:12:22.459172 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerDied","Data":"c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d"} Jan 27 22:12:22 crc kubenswrapper[4793]: I0127 22:12:22.461037 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerStarted","Data":"a873334a975b918215c12450d79b00ce0cf474af985bf1382a9404c4c0b34bd9"} Jan 27 22:12:22 crc kubenswrapper[4793]: I0127 22:12:22.463088 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:12:23 crc kubenswrapper[4793]: I0127 22:12:23.475365 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerStarted","Data":"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254"} Jan 27 22:12:24 crc kubenswrapper[4793]: I0127 22:12:24.485053 4793 generic.go:334] "Generic (PLEG): container finished" podID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerID="25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254" exitCode=0 Jan 27 22:12:24 crc kubenswrapper[4793]: I0127 22:12:24.485100 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerDied","Data":"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254"} Jan 27 22:12:25 crc kubenswrapper[4793]: I0127 22:12:25.501883 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerStarted","Data":"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef"} Jan 27 22:12:25 crc kubenswrapper[4793]: I0127 22:12:25.546969 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vv7tt" podStartSLOduration=3.037083847 podStartE2EDuration="5.546936121s" podCreationTimestamp="2026-01-27 22:12:20 +0000 UTC" firstStartedPulling="2026-01-27 22:12:22.462584964 +0000 UTC m=+7767.852838150" lastFinishedPulling="2026-01-27 22:12:24.972437238 +0000 UTC m=+7770.362690424" observedRunningTime="2026-01-27 22:12:25.536367746 +0000 UTC m=+7770.926620912" watchObservedRunningTime="2026-01-27 22:12:25.546936121 +0000 UTC m=+7770.937189287" Jan 27 22:12:31 crc kubenswrapper[4793]: I0127 22:12:31.359278 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:31 crc kubenswrapper[4793]: I0127 22:12:31.360233 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:31 crc kubenswrapper[4793]: I0127 22:12:31.425582 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:31 crc kubenswrapper[4793]: I0127 22:12:31.638467 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:31 crc kubenswrapper[4793]: I0127 22:12:31.709897 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:32 crc kubenswrapper[4793]: I0127 22:12:32.805998 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:12:32 crc kubenswrapper[4793]: E0127 22:12:32.806281 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:12:33 crc kubenswrapper[4793]: I0127 22:12:33.606965 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vv7tt" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="registry-server" containerID="cri-o://fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef" gracePeriod=2 Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.206676 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.362404 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities\") pod \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.362619 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmp8m\" (UniqueName: \"kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m\") pod \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.362751 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content\") pod \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\" (UID: \"6a91e753-c983-47d3-a46e-3ad83d4b4a1e\") " Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.363967 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities" (OuterVolumeSpecName: "utilities") pod "6a91e753-c983-47d3-a46e-3ad83d4b4a1e" (UID: "6a91e753-c983-47d3-a46e-3ad83d4b4a1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.376731 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m" (OuterVolumeSpecName: "kube-api-access-dmp8m") pod "6a91e753-c983-47d3-a46e-3ad83d4b4a1e" (UID: "6a91e753-c983-47d3-a46e-3ad83d4b4a1e"). InnerVolumeSpecName "kube-api-access-dmp8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.385879 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6a91e753-c983-47d3-a46e-3ad83d4b4a1e" (UID: "6a91e753-c983-47d3-a46e-3ad83d4b4a1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.465902 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmp8m\" (UniqueName: \"kubernetes.io/projected/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-kube-api-access-dmp8m\") on node \"crc\" DevicePath \"\"" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.465943 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.465956 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6a91e753-c983-47d3-a46e-3ad83d4b4a1e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.617903 4793 generic.go:334] "Generic (PLEG): container finished" podID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerID="fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef" exitCode=0 Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.617972 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv7tt" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.618210 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerDied","Data":"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef"} Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.618363 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv7tt" event={"ID":"6a91e753-c983-47d3-a46e-3ad83d4b4a1e","Type":"ContainerDied","Data":"a873334a975b918215c12450d79b00ce0cf474af985bf1382a9404c4c0b34bd9"} Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.618469 4793 scope.go:117] "RemoveContainer" containerID="fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.638154 4793 scope.go:117] "RemoveContainer" containerID="25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.670351 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.680986 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv7tt"] Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.681853 4793 scope.go:117] "RemoveContainer" containerID="c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.724437 4793 scope.go:117] "RemoveContainer" containerID="fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef" Jan 27 22:12:34 crc kubenswrapper[4793]: E0127 22:12:34.725752 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef\": container with ID starting with fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef not found: ID does not exist" containerID="fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.725894 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef"} err="failed to get container status \"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef\": rpc error: code = NotFound desc = could not find container \"fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef\": container with ID starting with fe9b8edf409836d9a1a6df91f2b6e5cc5fa6fce5250b0f30771f5cce6dab1eef not found: ID does not exist" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.725998 4793 scope.go:117] "RemoveContainer" containerID="25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254" Jan 27 22:12:34 crc kubenswrapper[4793]: E0127 22:12:34.726866 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254\": container with ID starting with 25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254 not found: ID does not exist" containerID="25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.726961 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254"} err="failed to get container status \"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254\": rpc error: code = NotFound desc = could not find container \"25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254\": container with ID starting with 25c3021976d9fdc0c86963ea492a6a2f819dbe1d9c5338ceda4dcb2363c31254 not found: ID does not exist" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.727055 4793 scope.go:117] "RemoveContainer" containerID="c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d" Jan 27 22:12:34 crc kubenswrapper[4793]: E0127 22:12:34.727641 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d\": container with ID starting with c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d not found: ID does not exist" containerID="c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d" Jan 27 22:12:34 crc kubenswrapper[4793]: I0127 22:12:34.727733 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d"} err="failed to get container status \"c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d\": rpc error: code = NotFound desc = could not find container \"c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d\": container with ID starting with c4246aa6b5bfacdd08bf2df3fdb741f2868770139e51055ec1a0a80f9721ca3d not found: ID does not exist" Jan 27 22:12:35 crc kubenswrapper[4793]: I0127 22:12:35.828435 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" path="/var/lib/kubelet/pods/6a91e753-c983-47d3-a46e-3ad83d4b4a1e/volumes" Jan 27 22:12:46 crc kubenswrapper[4793]: I0127 22:12:46.804646 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:12:46 crc kubenswrapper[4793]: E0127 22:12:46.806311 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:13:00 crc kubenswrapper[4793]: I0127 22:13:00.804596 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:13:00 crc kubenswrapper[4793]: E0127 22:13:00.806444 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:13:13 crc kubenswrapper[4793]: I0127 22:13:13.803385 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:13:13 crc kubenswrapper[4793]: E0127 22:13:13.804269 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:13:25 crc kubenswrapper[4793]: I0127 22:13:24.803893 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:13:25 crc kubenswrapper[4793]: E0127 22:13:24.805167 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:13:39 crc kubenswrapper[4793]: I0127 22:13:39.803861 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:13:39 crc kubenswrapper[4793]: E0127 22:13:39.804921 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:13:51 crc kubenswrapper[4793]: I0127 22:13:51.804797 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:13:51 crc kubenswrapper[4793]: E0127 22:13:51.805901 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:14:04 crc kubenswrapper[4793]: I0127 22:14:04.804435 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:14:04 crc kubenswrapper[4793]: E0127 22:14:04.805255 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:14:16 crc kubenswrapper[4793]: I0127 22:14:16.804295 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:14:16 crc kubenswrapper[4793]: E0127 22:14:16.805789 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:14:22 crc kubenswrapper[4793]: I0127 22:14:22.754314 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:14:22 crc kubenswrapper[4793]: I0127 22:14:22.755198 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:14:30 crc kubenswrapper[4793]: I0127 22:14:30.803930 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:14:30 crc kubenswrapper[4793]: E0127 22:14:30.805101 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:14:43 crc kubenswrapper[4793]: I0127 22:14:43.804117 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:14:43 crc kubenswrapper[4793]: E0127 22:14:43.805642 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:14:52 crc kubenswrapper[4793]: I0127 22:14:52.753929 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:14:52 crc kubenswrapper[4793]: I0127 22:14:52.754785 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:14:55 crc kubenswrapper[4793]: I0127 22:14:55.821994 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:14:55 crc kubenswrapper[4793]: E0127 22:14:55.822972 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.202732 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78"] Jan 27 22:15:00 crc kubenswrapper[4793]: E0127 22:15:00.203874 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="extract-content" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.203891 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="extract-content" Jan 27 22:15:00 crc kubenswrapper[4793]: E0127 22:15:00.203911 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="registry-server" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.203918 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="registry-server" Jan 27 22:15:00 crc kubenswrapper[4793]: E0127 22:15:00.203936 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="extract-utilities" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.203945 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="extract-utilities" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.204862 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a91e753-c983-47d3-a46e-3ad83d4b4a1e" containerName="registry-server" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.205857 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.214924 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.215033 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.223856 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78"] Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.373425 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.373641 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t42s4\" (UniqueName: \"kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.373713 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.475895 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.476118 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.476300 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t42s4\" (UniqueName: \"kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.477874 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.485163 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.521306 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t42s4\" (UniqueName: \"kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4\") pod \"collect-profiles-29492535-r6m78\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:00 crc kubenswrapper[4793]: I0127 22:15:00.554845 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:01 crc kubenswrapper[4793]: I0127 22:15:01.016020 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78"] Jan 27 22:15:01 crc kubenswrapper[4793]: W0127 22:15:01.023756 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b9eeb44_3b19_4c99_8101_9320f01c0eb9.slice/crio-86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8 WatchSource:0}: Error finding container 86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8: Status 404 returned error can't find the container with id 86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8 Jan 27 22:15:02 crc kubenswrapper[4793]: I0127 22:15:02.012616 4793 generic.go:334] "Generic (PLEG): container finished" podID="2b9eeb44-3b19-4c99-8101-9320f01c0eb9" containerID="9a035f21fc8db686def841e5283645ad7a345117764617d7770823cd3ad7c974" exitCode=0 Jan 27 22:15:02 crc kubenswrapper[4793]: I0127 22:15:02.012692 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" event={"ID":"2b9eeb44-3b19-4c99-8101-9320f01c0eb9","Type":"ContainerDied","Data":"9a035f21fc8db686def841e5283645ad7a345117764617d7770823cd3ad7c974"} Jan 27 22:15:02 crc kubenswrapper[4793]: I0127 22:15:02.013814 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" event={"ID":"2b9eeb44-3b19-4c99-8101-9320f01c0eb9","Type":"ContainerStarted","Data":"86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8"} Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.456299 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.535665 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t42s4\" (UniqueName: \"kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4\") pod \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.535853 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume\") pod \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.536400 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume\") pod \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\" (UID: \"2b9eeb44-3b19-4c99-8101-9320f01c0eb9\") " Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.536887 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume" (OuterVolumeSpecName: "config-volume") pod "2b9eeb44-3b19-4c99-8101-9320f01c0eb9" (UID: "2b9eeb44-3b19-4c99-8101-9320f01c0eb9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.539209 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.548037 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2b9eeb44-3b19-4c99-8101-9320f01c0eb9" (UID: "2b9eeb44-3b19-4c99-8101-9320f01c0eb9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.550722 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4" (OuterVolumeSpecName: "kube-api-access-t42s4") pod "2b9eeb44-3b19-4c99-8101-9320f01c0eb9" (UID: "2b9eeb44-3b19-4c99-8101-9320f01c0eb9"). InnerVolumeSpecName "kube-api-access-t42s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.640913 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t42s4\" (UniqueName: \"kubernetes.io/projected/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-kube-api-access-t42s4\") on node \"crc\" DevicePath \"\"" Jan 27 22:15:03 crc kubenswrapper[4793]: I0127 22:15:03.640944 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b9eeb44-3b19-4c99-8101-9320f01c0eb9-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:15:04 crc kubenswrapper[4793]: I0127 22:15:04.042255 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" event={"ID":"2b9eeb44-3b19-4c99-8101-9320f01c0eb9","Type":"ContainerDied","Data":"86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8"} Jan 27 22:15:04 crc kubenswrapper[4793]: I0127 22:15:04.042302 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86e6dc612bcaba5aae6316f1e19574564be562d5b95b3ef87a4098abea27d6a8" Jan 27 22:15:04 crc kubenswrapper[4793]: I0127 22:15:04.042375 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492535-r6m78" Jan 27 22:15:04 crc kubenswrapper[4793]: I0127 22:15:04.550135 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv"] Jan 27 22:15:04 crc kubenswrapper[4793]: I0127 22:15:04.563119 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492490-r2gnv"] Jan 27 22:15:05 crc kubenswrapper[4793]: I0127 22:15:05.976319 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8347701-8062-418c-9ff1-9b7a05a3509e" path="/var/lib/kubelet/pods/e8347701-8062-418c-9ff1-9b7a05a3509e/volumes" Jan 27 22:15:08 crc kubenswrapper[4793]: I0127 22:15:08.804614 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:15:08 crc kubenswrapper[4793]: E0127 22:15:08.807530 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.753594 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.754618 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.754701 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.755789 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.755875 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437" gracePeriod=600 Jan 27 22:15:22 crc kubenswrapper[4793]: I0127 22:15:22.804725 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:15:22 crc kubenswrapper[4793]: E0127 22:15:22.805058 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:15:23 crc kubenswrapper[4793]: I0127 22:15:23.266363 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437" exitCode=0 Jan 27 22:15:23 crc kubenswrapper[4793]: I0127 22:15:23.266422 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437"} Jan 27 22:15:23 crc kubenswrapper[4793]: I0127 22:15:23.266869 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120"} Jan 27 22:15:23 crc kubenswrapper[4793]: I0127 22:15:23.266900 4793 scope.go:117] "RemoveContainer" containerID="32443115b6ff5b4bdfca2a12f2b983016aec8ad314965b03426cde90cf0138d8" Jan 27 22:15:37 crc kubenswrapper[4793]: I0127 22:15:37.803688 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:15:38 crc kubenswrapper[4793]: I0127 22:15:38.673797 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625"} Jan 27 22:15:38 crc kubenswrapper[4793]: I0127 22:15:38.989970 4793 scope.go:117] "RemoveContainer" containerID="1736be7a798a3618bf76683fb4f57755fc764015e2846571c7e9f6aba88410e1" Jan 27 22:15:40 crc kubenswrapper[4793]: I0127 22:15:40.701774 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" exitCode=1 Jan 27 22:15:40 crc kubenswrapper[4793]: I0127 22:15:40.701841 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625"} Jan 27 22:15:40 crc kubenswrapper[4793]: I0127 22:15:40.703287 4793 scope.go:117] "RemoveContainer" containerID="37d9cb9281b89739f3ad9db9ed1e960823640cdbcf0c44bc9ba74b1d3b1a7d5f" Jan 27 22:15:40 crc kubenswrapper[4793]: I0127 22:15:40.703862 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:15:40 crc kubenswrapper[4793]: E0127 22:15:40.704302 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:15:43 crc kubenswrapper[4793]: I0127 22:15:43.243039 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:15:43 crc kubenswrapper[4793]: I0127 22:15:43.244923 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:15:43 crc kubenswrapper[4793]: E0127 22:15:43.245293 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:15:48 crc kubenswrapper[4793]: I0127 22:15:48.242473 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:15:48 crc kubenswrapper[4793]: I0127 22:15:48.243401 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:15:48 crc kubenswrapper[4793]: I0127 22:15:48.243415 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:15:48 crc kubenswrapper[4793]: I0127 22:15:48.244772 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:15:48 crc kubenswrapper[4793]: E0127 22:15:48.245246 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:00 crc kubenswrapper[4793]: I0127 22:16:00.816181 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:16:00 crc kubenswrapper[4793]: E0127 22:16:00.817269 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.609019 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:05 crc kubenswrapper[4793]: E0127 22:16:05.610768 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b9eeb44-3b19-4c99-8101-9320f01c0eb9" containerName="collect-profiles" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.610798 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b9eeb44-3b19-4c99-8101-9320f01c0eb9" containerName="collect-profiles" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.611143 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b9eeb44-3b19-4c99-8101-9320f01c0eb9" containerName="collect-profiles" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.614341 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.846425 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.846702 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkj6x\" (UniqueName: \"kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.846757 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.861284 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.948637 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.949203 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.949565 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkj6x\" (UniqueName: \"kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.949668 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.950636 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:05 crc kubenswrapper[4793]: I0127 22:16:05.972758 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkj6x\" (UniqueName: \"kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x\") pod \"community-operators-b2j7l\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:06 crc kubenswrapper[4793]: I0127 22:16:06.149301 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:06 crc kubenswrapper[4793]: I0127 22:16:06.743815 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:06 crc kubenswrapper[4793]: W0127 22:16:06.751579 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9518947_1a40_464c_872a_b8f1d5d56226.slice/crio-e77c9a644c88dec6ad6de13fbfbdd4355e201a834e5e87ef97af0a8b4d4d4dce WatchSource:0}: Error finding container e77c9a644c88dec6ad6de13fbfbdd4355e201a834e5e87ef97af0a8b4d4d4dce: Status 404 returned error can't find the container with id e77c9a644c88dec6ad6de13fbfbdd4355e201a834e5e87ef97af0a8b4d4d4dce Jan 27 22:16:07 crc kubenswrapper[4793]: I0127 22:16:07.221452 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9518947-1a40-464c-872a-b8f1d5d56226" containerID="8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac" exitCode=0 Jan 27 22:16:07 crc kubenswrapper[4793]: I0127 22:16:07.221592 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerDied","Data":"8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac"} Jan 27 22:16:07 crc kubenswrapper[4793]: I0127 22:16:07.221859 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerStarted","Data":"e77c9a644c88dec6ad6de13fbfbdd4355e201a834e5e87ef97af0a8b4d4d4dce"} Jan 27 22:16:08 crc kubenswrapper[4793]: I0127 22:16:08.235974 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerStarted","Data":"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c"} Jan 27 22:16:10 crc kubenswrapper[4793]: I0127 22:16:10.265010 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9518947-1a40-464c-872a-b8f1d5d56226" containerID="6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c" exitCode=0 Jan 27 22:16:10 crc kubenswrapper[4793]: I0127 22:16:10.265094 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerDied","Data":"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c"} Jan 27 22:16:12 crc kubenswrapper[4793]: I0127 22:16:12.300254 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerStarted","Data":"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674"} Jan 27 22:16:12 crc kubenswrapper[4793]: I0127 22:16:12.472754 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b2j7l" podStartSLOduration=3.625735239 podStartE2EDuration="7.47272636s" podCreationTimestamp="2026-01-27 22:16:05 +0000 UTC" firstStartedPulling="2026-01-27 22:16:07.226110738 +0000 UTC m=+7992.616363904" lastFinishedPulling="2026-01-27 22:16:11.073101829 +0000 UTC m=+7996.463355025" observedRunningTime="2026-01-27 22:16:12.463816345 +0000 UTC m=+7997.854069501" watchObservedRunningTime="2026-01-27 22:16:12.47272636 +0000 UTC m=+7997.862979516" Jan 27 22:16:13 crc kubenswrapper[4793]: I0127 22:16:13.805170 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:16:13 crc kubenswrapper[4793]: E0127 22:16:13.806123 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:16 crc kubenswrapper[4793]: I0127 22:16:16.150958 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:16 crc kubenswrapper[4793]: I0127 22:16:16.152666 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:16 crc kubenswrapper[4793]: I0127 22:16:16.245913 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:16 crc kubenswrapper[4793]: I0127 22:16:16.625046 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:16 crc kubenswrapper[4793]: I0127 22:16:16.672839 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:18 crc kubenswrapper[4793]: I0127 22:16:18.607512 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b2j7l" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="registry-server" containerID="cri-o://be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674" gracePeriod=2 Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.281279 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.332081 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkj6x\" (UniqueName: \"kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x\") pod \"e9518947-1a40-464c-872a-b8f1d5d56226\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.333083 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content\") pod \"e9518947-1a40-464c-872a-b8f1d5d56226\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.333222 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities\") pod \"e9518947-1a40-464c-872a-b8f1d5d56226\" (UID: \"e9518947-1a40-464c-872a-b8f1d5d56226\") " Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.334850 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities" (OuterVolumeSpecName: "utilities") pod "e9518947-1a40-464c-872a-b8f1d5d56226" (UID: "e9518947-1a40-464c-872a-b8f1d5d56226"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.349829 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x" (OuterVolumeSpecName: "kube-api-access-mkj6x") pod "e9518947-1a40-464c-872a-b8f1d5d56226" (UID: "e9518947-1a40-464c-872a-b8f1d5d56226"). InnerVolumeSpecName "kube-api-access-mkj6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.383252 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e9518947-1a40-464c-872a-b8f1d5d56226" (UID: "e9518947-1a40-464c-872a-b8f1d5d56226"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.437014 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.437068 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkj6x\" (UniqueName: \"kubernetes.io/projected/e9518947-1a40-464c-872a-b8f1d5d56226-kube-api-access-mkj6x\") on node \"crc\" DevicePath \"\"" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.437080 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9518947-1a40-464c-872a-b8f1d5d56226-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.619242 4793 generic.go:334] "Generic (PLEG): container finished" podID="e9518947-1a40-464c-872a-b8f1d5d56226" containerID="be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674" exitCode=0 Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.619329 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerDied","Data":"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674"} Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.619307 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2j7l" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.619375 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2j7l" event={"ID":"e9518947-1a40-464c-872a-b8f1d5d56226","Type":"ContainerDied","Data":"e77c9a644c88dec6ad6de13fbfbdd4355e201a834e5e87ef97af0a8b4d4d4dce"} Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.619393 4793 scope.go:117] "RemoveContainer" containerID="be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.662217 4793 scope.go:117] "RemoveContainer" containerID="6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.688205 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.706126 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b2j7l"] Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.708243 4793 scope.go:117] "RemoveContainer" containerID="8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.777228 4793 scope.go:117] "RemoveContainer" containerID="be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674" Jan 27 22:16:19 crc kubenswrapper[4793]: E0127 22:16:19.778238 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674\": container with ID starting with be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674 not found: ID does not exist" containerID="be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.778284 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674"} err="failed to get container status \"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674\": rpc error: code = NotFound desc = could not find container \"be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674\": container with ID starting with be8cda9d3de80514ad5c684a56301b1822c0fd8c5317b25326a456e9bab94674 not found: ID does not exist" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.778323 4793 scope.go:117] "RemoveContainer" containerID="6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c" Jan 27 22:16:19 crc kubenswrapper[4793]: E0127 22:16:19.778740 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c\": container with ID starting with 6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c not found: ID does not exist" containerID="6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.778793 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c"} err="failed to get container status \"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c\": rpc error: code = NotFound desc = could not find container \"6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c\": container with ID starting with 6aab08776e3aba4b9943d42fd95dad1f55224bfd365ef2ff21e43c7be123228c not found: ID does not exist" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.778854 4793 scope.go:117] "RemoveContainer" containerID="8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac" Jan 27 22:16:19 crc kubenswrapper[4793]: E0127 22:16:19.779113 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac\": container with ID starting with 8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac not found: ID does not exist" containerID="8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.779140 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac"} err="failed to get container status \"8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac\": rpc error: code = NotFound desc = could not find container \"8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac\": container with ID starting with 8479fdd85b62a5ecf971c2f1e0947adeb8a16c14176293478caa9642a292b9ac not found: ID does not exist" Jan 27 22:16:19 crc kubenswrapper[4793]: I0127 22:16:19.825135 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" path="/var/lib/kubelet/pods/e9518947-1a40-464c-872a-b8f1d5d56226/volumes" Jan 27 22:16:26 crc kubenswrapper[4793]: I0127 22:16:26.803838 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:16:26 crc kubenswrapper[4793]: E0127 22:16:26.805388 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:41 crc kubenswrapper[4793]: I0127 22:16:41.804221 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:16:41 crc kubenswrapper[4793]: E0127 22:16:41.805302 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.717462 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:16:47 crc kubenswrapper[4793]: E0127 22:16:47.718672 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="extract-utilities" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.718687 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="extract-utilities" Jan 27 22:16:47 crc kubenswrapper[4793]: E0127 22:16:47.718714 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="extract-content" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.718720 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="extract-content" Jan 27 22:16:47 crc kubenswrapper[4793]: E0127 22:16:47.718748 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="registry-server" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.718756 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="registry-server" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.719005 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9518947-1a40-464c-872a-b8f1d5d56226" containerName="registry-server" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.720623 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.734749 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.739244 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2x7d\" (UniqueName: \"kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.739666 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.740127 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.842855 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.848253 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2x7d\" (UniqueName: \"kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.849141 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.843331 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.850029 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:47 crc kubenswrapper[4793]: I0127 22:16:47.874310 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2x7d\" (UniqueName: \"kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d\") pod \"redhat-operators-hgxpc\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:48 crc kubenswrapper[4793]: I0127 22:16:48.052559 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:48 crc kubenswrapper[4793]: I0127 22:16:48.386162 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:16:48 crc kubenswrapper[4793]: I0127 22:16:48.639763 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerStarted","Data":"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d"} Jan 27 22:16:48 crc kubenswrapper[4793]: I0127 22:16:48.639816 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerStarted","Data":"dd9a8b038429a30f605d78fdcd4fd067ff64f09d6c2aa57943d9c46e87402c17"} Jan 27 22:16:49 crc kubenswrapper[4793]: I0127 22:16:49.657243 4793 generic.go:334] "Generic (PLEG): container finished" podID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerID="26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d" exitCode=0 Jan 27 22:16:49 crc kubenswrapper[4793]: I0127 22:16:49.657308 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerDied","Data":"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d"} Jan 27 22:16:50 crc kubenswrapper[4793]: I0127 22:16:50.672775 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerStarted","Data":"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20"} Jan 27 22:16:53 crc kubenswrapper[4793]: I0127 22:16:53.804177 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:16:53 crc kubenswrapper[4793]: E0127 22:16:53.805257 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:16:55 crc kubenswrapper[4793]: I0127 22:16:55.755225 4793 generic.go:334] "Generic (PLEG): container finished" podID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerID="7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20" exitCode=0 Jan 27 22:16:55 crc kubenswrapper[4793]: I0127 22:16:55.755363 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerDied","Data":"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20"} Jan 27 22:16:56 crc kubenswrapper[4793]: I0127 22:16:56.783120 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerStarted","Data":"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240"} Jan 27 22:16:56 crc kubenswrapper[4793]: I0127 22:16:56.819298 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hgxpc" podStartSLOduration=3.337857731 podStartE2EDuration="9.819262608s" podCreationTimestamp="2026-01-27 22:16:47 +0000 UTC" firstStartedPulling="2026-01-27 22:16:49.666509242 +0000 UTC m=+8035.056762438" lastFinishedPulling="2026-01-27 22:16:56.147914119 +0000 UTC m=+8041.538167315" observedRunningTime="2026-01-27 22:16:56.81729608 +0000 UTC m=+8042.207549276" watchObservedRunningTime="2026-01-27 22:16:56.819262608 +0000 UTC m=+8042.209515804" Jan 27 22:16:58 crc kubenswrapper[4793]: I0127 22:16:58.053176 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:58 crc kubenswrapper[4793]: I0127 22:16:58.053768 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:16:59 crc kubenswrapper[4793]: I0127 22:16:59.137205 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgxpc" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" probeResult="failure" output=< Jan 27 22:16:59 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:16:59 crc kubenswrapper[4793]: > Jan 27 22:17:08 crc kubenswrapper[4793]: I0127 22:17:08.806180 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:17:08 crc kubenswrapper[4793]: E0127 22:17:08.807331 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:17:09 crc kubenswrapper[4793]: I0127 22:17:09.120739 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgxpc" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" probeResult="failure" output=< Jan 27 22:17:09 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:17:09 crc kubenswrapper[4793]: > Jan 27 22:17:19 crc kubenswrapper[4793]: I0127 22:17:19.221061 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgxpc" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" probeResult="failure" output=< Jan 27 22:17:19 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:17:19 crc kubenswrapper[4793]: > Jan 27 22:17:20 crc kubenswrapper[4793]: I0127 22:17:20.803928 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:17:20 crc kubenswrapper[4793]: E0127 22:17:20.805084 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:17:28 crc kubenswrapper[4793]: I0127 22:17:28.126742 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:17:28 crc kubenswrapper[4793]: I0127 22:17:28.193296 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:17:28 crc kubenswrapper[4793]: I0127 22:17:28.373130 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:17:29 crc kubenswrapper[4793]: I0127 22:17:29.445037 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hgxpc" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" containerID="cri-o://f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240" gracePeriod=2 Jan 27 22:17:29 crc kubenswrapper[4793]: I0127 22:17:29.925439 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.120200 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content\") pod \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.120296 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2x7d\" (UniqueName: \"kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d\") pod \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.120410 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities\") pod \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\" (UID: \"9fc381fb-9389-44c4-9fc2-d94e7e2256ac\") " Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.122269 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities" (OuterVolumeSpecName: "utilities") pod "9fc381fb-9389-44c4-9fc2-d94e7e2256ac" (UID: "9fc381fb-9389-44c4-9fc2-d94e7e2256ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.137924 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d" (OuterVolumeSpecName: "kube-api-access-c2x7d") pod "9fc381fb-9389-44c4-9fc2-d94e7e2256ac" (UID: "9fc381fb-9389-44c4-9fc2-d94e7e2256ac"). InnerVolumeSpecName "kube-api-access-c2x7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.222562 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.222596 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2x7d\" (UniqueName: \"kubernetes.io/projected/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-kube-api-access-c2x7d\") on node \"crc\" DevicePath \"\"" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.274615 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9fc381fb-9389-44c4-9fc2-d94e7e2256ac" (UID: "9fc381fb-9389-44c4-9fc2-d94e7e2256ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.324611 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fc381fb-9389-44c4-9fc2-d94e7e2256ac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.458688 4793 generic.go:334] "Generic (PLEG): container finished" podID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerID="f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240" exitCode=0 Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.458742 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerDied","Data":"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240"} Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.458773 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgxpc" event={"ID":"9fc381fb-9389-44c4-9fc2-d94e7e2256ac","Type":"ContainerDied","Data":"dd9a8b038429a30f605d78fdcd4fd067ff64f09d6c2aa57943d9c46e87402c17"} Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.458795 4793 scope.go:117] "RemoveContainer" containerID="f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.458791 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgxpc" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.502278 4793 scope.go:117] "RemoveContainer" containerID="7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.517591 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.528398 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hgxpc"] Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.793967 4793 scope.go:117] "RemoveContainer" containerID="26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.825187 4793 scope.go:117] "RemoveContainer" containerID="f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240" Jan 27 22:17:30 crc kubenswrapper[4793]: E0127 22:17:30.829086 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240\": container with ID starting with f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240 not found: ID does not exist" containerID="f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.829135 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240"} err="failed to get container status \"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240\": rpc error: code = NotFound desc = could not find container \"f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240\": container with ID starting with f401ac4b886b596e00d28fd97008ec7546d75d149477699e78d234b696fae240 not found: ID does not exist" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.829161 4793 scope.go:117] "RemoveContainer" containerID="7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20" Jan 27 22:17:30 crc kubenswrapper[4793]: E0127 22:17:30.829510 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20\": container with ID starting with 7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20 not found: ID does not exist" containerID="7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.829529 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20"} err="failed to get container status \"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20\": rpc error: code = NotFound desc = could not find container \"7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20\": container with ID starting with 7714469b66975d35c8030a454a1143f9befa6c18d980f009c5bb6e5be0b51d20 not found: ID does not exist" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.829557 4793 scope.go:117] "RemoveContainer" containerID="26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d" Jan 27 22:17:30 crc kubenswrapper[4793]: E0127 22:17:30.831282 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d\": container with ID starting with 26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d not found: ID does not exist" containerID="26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d" Jan 27 22:17:30 crc kubenswrapper[4793]: I0127 22:17:30.831317 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d"} err="failed to get container status \"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d\": rpc error: code = NotFound desc = could not find container \"26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d\": container with ID starting with 26588c0c280e231a770b53dfbb8d5538d904dacef4daf405cc622f8f0d55138d not found: ID does not exist" Jan 27 22:17:31 crc kubenswrapper[4793]: I0127 22:17:31.819124 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" path="/var/lib/kubelet/pods/9fc381fb-9389-44c4-9fc2-d94e7e2256ac/volumes" Jan 27 22:17:32 crc kubenswrapper[4793]: I0127 22:17:32.803672 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:17:32 crc kubenswrapper[4793]: E0127 22:17:32.804416 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:17:47 crc kubenswrapper[4793]: I0127 22:17:47.804488 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:17:47 crc kubenswrapper[4793]: E0127 22:17:47.805680 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:17:52 crc kubenswrapper[4793]: I0127 22:17:52.753284 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:17:52 crc kubenswrapper[4793]: I0127 22:17:52.754008 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:17:58 crc kubenswrapper[4793]: I0127 22:17:58.803812 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:17:58 crc kubenswrapper[4793]: E0127 22:17:58.804682 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.115204 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:08 crc kubenswrapper[4793]: E0127 22:18:08.116692 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="extract-content" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.116711 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="extract-content" Jan 27 22:18:08 crc kubenswrapper[4793]: E0127 22:18:08.116756 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="extract-utilities" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.116764 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="extract-utilities" Jan 27 22:18:08 crc kubenswrapper[4793]: E0127 22:18:08.116794 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.116801 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.117045 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fc381fb-9389-44c4-9fc2-d94e7e2256ac" containerName="registry-server" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.119079 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.126151 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.265590 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.266097 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.266398 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4f5p\" (UniqueName: \"kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.368442 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.368966 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.369366 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.369465 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4f5p\" (UniqueName: \"kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.369671 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.402245 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4f5p\" (UniqueName: \"kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p\") pod \"certified-operators-vhf5p\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:08 crc kubenswrapper[4793]: I0127 22:18:08.446816 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:09 crc kubenswrapper[4793]: I0127 22:18:09.025922 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:09 crc kubenswrapper[4793]: I0127 22:18:09.951972 4793 generic.go:334] "Generic (PLEG): container finished" podID="967609bb-fe06-4539-82c4-969889e4d12e" containerID="d02717c34adfaf92a7803c7d2f667aa3a529cc26525ef78ba8b2d99e100ee4b4" exitCode=0 Jan 27 22:18:09 crc kubenswrapper[4793]: I0127 22:18:09.952091 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerDied","Data":"d02717c34adfaf92a7803c7d2f667aa3a529cc26525ef78ba8b2d99e100ee4b4"} Jan 27 22:18:09 crc kubenswrapper[4793]: I0127 22:18:09.952211 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerStarted","Data":"da185438a113e38610927ea6a5849615ed6d66ee4b74b4bd079790712f940923"} Jan 27 22:18:09 crc kubenswrapper[4793]: I0127 22:18:09.958774 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:18:10 crc kubenswrapper[4793]: I0127 22:18:10.805156 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:18:10 crc kubenswrapper[4793]: E0127 22:18:10.807041 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:18:11 crc kubenswrapper[4793]: I0127 22:18:11.989003 4793 generic.go:334] "Generic (PLEG): container finished" podID="967609bb-fe06-4539-82c4-969889e4d12e" containerID="7c6c6253b0f2fbd47c65817d28145cd659520a44299fcac16c361dddd22b5749" exitCode=0 Jan 27 22:18:11 crc kubenswrapper[4793]: I0127 22:18:11.989059 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerDied","Data":"7c6c6253b0f2fbd47c65817d28145cd659520a44299fcac16c361dddd22b5749"} Jan 27 22:18:13 crc kubenswrapper[4793]: I0127 22:18:13.002769 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerStarted","Data":"b205607379fa1cccbc207660df095492a3a660052ce943d6e82a2db1345f47fa"} Jan 27 22:18:13 crc kubenswrapper[4793]: I0127 22:18:13.034529 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vhf5p" podStartSLOduration=2.350318875 podStartE2EDuration="5.034508581s" podCreationTimestamp="2026-01-27 22:18:08 +0000 UTC" firstStartedPulling="2026-01-27 22:18:09.958137458 +0000 UTC m=+8115.348390654" lastFinishedPulling="2026-01-27 22:18:12.642327164 +0000 UTC m=+8118.032580360" observedRunningTime="2026-01-27 22:18:13.025490353 +0000 UTC m=+8118.415743509" watchObservedRunningTime="2026-01-27 22:18:13.034508581 +0000 UTC m=+8118.424761737" Jan 27 22:18:18 crc kubenswrapper[4793]: I0127 22:18:18.447249 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:18 crc kubenswrapper[4793]: I0127 22:18:18.447898 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:18 crc kubenswrapper[4793]: I0127 22:18:18.524083 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:19 crc kubenswrapper[4793]: I0127 22:18:19.168476 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:21 crc kubenswrapper[4793]: I0127 22:18:21.918935 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:21 crc kubenswrapper[4793]: I0127 22:18:21.919778 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vhf5p" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="registry-server" containerID="cri-o://b205607379fa1cccbc207660df095492a3a660052ce943d6e82a2db1345f47fa" gracePeriod=2 Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.131451 4793 generic.go:334] "Generic (PLEG): container finished" podID="967609bb-fe06-4539-82c4-969889e4d12e" containerID="b205607379fa1cccbc207660df095492a3a660052ce943d6e82a2db1345f47fa" exitCode=0 Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.131799 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerDied","Data":"b205607379fa1cccbc207660df095492a3a660052ce943d6e82a2db1345f47fa"} Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.494431 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.581227 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities\") pod \"967609bb-fe06-4539-82c4-969889e4d12e\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.581325 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4f5p\" (UniqueName: \"kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p\") pod \"967609bb-fe06-4539-82c4-969889e4d12e\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.582120 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content\") pod \"967609bb-fe06-4539-82c4-969889e4d12e\" (UID: \"967609bb-fe06-4539-82c4-969889e4d12e\") " Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.583898 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities" (OuterVolumeSpecName: "utilities") pod "967609bb-fe06-4539-82c4-969889e4d12e" (UID: "967609bb-fe06-4539-82c4-969889e4d12e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.596786 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p" (OuterVolumeSpecName: "kube-api-access-h4f5p") pod "967609bb-fe06-4539-82c4-969889e4d12e" (UID: "967609bb-fe06-4539-82c4-969889e4d12e"). InnerVolumeSpecName "kube-api-access-h4f5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.632583 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "967609bb-fe06-4539-82c4-969889e4d12e" (UID: "967609bb-fe06-4539-82c4-969889e4d12e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.686594 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.686646 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4f5p\" (UniqueName: \"kubernetes.io/projected/967609bb-fe06-4539-82c4-969889e4d12e-kube-api-access-h4f5p\") on node \"crc\" DevicePath \"\"" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.686661 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/967609bb-fe06-4539-82c4-969889e4d12e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.753921 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.753995 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:18:22 crc kubenswrapper[4793]: I0127 22:18:22.803697 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:18:22 crc kubenswrapper[4793]: E0127 22:18:22.804040 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.223410 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vhf5p" event={"ID":"967609bb-fe06-4539-82c4-969889e4d12e","Type":"ContainerDied","Data":"da185438a113e38610927ea6a5849615ed6d66ee4b74b4bd079790712f940923"} Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.223735 4793 scope.go:117] "RemoveContainer" containerID="b205607379fa1cccbc207660df095492a3a660052ce943d6e82a2db1345f47fa" Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.223674 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vhf5p" Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.275269 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.276802 4793 scope.go:117] "RemoveContainer" containerID="7c6c6253b0f2fbd47c65817d28145cd659520a44299fcac16c361dddd22b5749" Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.293128 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vhf5p"] Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.302739 4793 scope.go:117] "RemoveContainer" containerID="d02717c34adfaf92a7803c7d2f667aa3a529cc26525ef78ba8b2d99e100ee4b4" Jan 27 22:18:23 crc kubenswrapper[4793]: I0127 22:18:23.823862 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="967609bb-fe06-4539-82c4-969889e4d12e" path="/var/lib/kubelet/pods/967609bb-fe06-4539-82c4-969889e4d12e/volumes" Jan 27 22:18:35 crc kubenswrapper[4793]: I0127 22:18:35.809763 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:18:35 crc kubenswrapper[4793]: E0127 22:18:35.810533 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:18:47 crc kubenswrapper[4793]: I0127 22:18:47.803979 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:18:47 crc kubenswrapper[4793]: E0127 22:18:47.805029 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:18:52 crc kubenswrapper[4793]: I0127 22:18:52.753704 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:18:52 crc kubenswrapper[4793]: I0127 22:18:52.754430 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:18:52 crc kubenswrapper[4793]: I0127 22:18:52.754498 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:18:52 crc kubenswrapper[4793]: I0127 22:18:52.755600 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:18:52 crc kubenswrapper[4793]: I0127 22:18:52.755685 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" gracePeriod=600 Jan 27 22:18:52 crc kubenswrapper[4793]: E0127 22:18:52.926143 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:18:53 crc kubenswrapper[4793]: I0127 22:18:53.643342 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" exitCode=0 Jan 27 22:18:53 crc kubenswrapper[4793]: I0127 22:18:53.643405 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120"} Jan 27 22:18:53 crc kubenswrapper[4793]: I0127 22:18:53.643450 4793 scope.go:117] "RemoveContainer" containerID="a722d8d749a9f7e0415ea5ba745c98ad7b557ce38a84931f7e836a29d1498437" Jan 27 22:18:53 crc kubenswrapper[4793]: I0127 22:18:53.644694 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:18:53 crc kubenswrapper[4793]: E0127 22:18:53.645233 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:18:59 crc kubenswrapper[4793]: I0127 22:18:59.803923 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:18:59 crc kubenswrapper[4793]: E0127 22:18:59.806332 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:19:04 crc kubenswrapper[4793]: I0127 22:19:04.804999 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:19:04 crc kubenswrapper[4793]: E0127 22:19:04.807933 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:19:12 crc kubenswrapper[4793]: I0127 22:19:12.804669 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:19:12 crc kubenswrapper[4793]: E0127 22:19:12.806304 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:19:15 crc kubenswrapper[4793]: I0127 22:19:15.819259 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:19:15 crc kubenswrapper[4793]: E0127 22:19:15.820405 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:19:25 crc kubenswrapper[4793]: I0127 22:19:25.832469 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:19:25 crc kubenswrapper[4793]: E0127 22:19:25.834775 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:19:29 crc kubenswrapper[4793]: I0127 22:19:29.803767 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:19:29 crc kubenswrapper[4793]: E0127 22:19:29.804576 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:19:40 crc kubenswrapper[4793]: I0127 22:19:40.804514 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:19:40 crc kubenswrapper[4793]: E0127 22:19:40.805944 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:19:42 crc kubenswrapper[4793]: I0127 22:19:42.804464 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:19:42 crc kubenswrapper[4793]: E0127 22:19:42.805227 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:19:53 crc kubenswrapper[4793]: I0127 22:19:53.808373 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:19:53 crc kubenswrapper[4793]: E0127 22:19:53.809298 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:19:54 crc kubenswrapper[4793]: I0127 22:19:54.802830 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:19:54 crc kubenswrapper[4793]: E0127 22:19:54.803140 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:20:07 crc kubenswrapper[4793]: I0127 22:20:07.804027 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:20:07 crc kubenswrapper[4793]: E0127 22:20:07.804844 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:09 crc kubenswrapper[4793]: I0127 22:20:09.804254 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:20:09 crc kubenswrapper[4793]: E0127 22:20:09.804887 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:20:20 crc kubenswrapper[4793]: I0127 22:20:20.803838 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:20:20 crc kubenswrapper[4793]: E0127 22:20:20.805172 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:23 crc kubenswrapper[4793]: I0127 22:20:23.804199 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:20:23 crc kubenswrapper[4793]: E0127 22:20:23.804890 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:20:33 crc kubenswrapper[4793]: I0127 22:20:33.804090 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:20:33 crc kubenswrapper[4793]: E0127 22:20:33.805121 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:34 crc kubenswrapper[4793]: I0127 22:20:34.803059 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:20:34 crc kubenswrapper[4793]: E0127 22:20:34.803719 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:20:46 crc kubenswrapper[4793]: I0127 22:20:46.803020 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:20:48 crc kubenswrapper[4793]: I0127 22:20:48.075428 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9"} Jan 27 22:20:48 crc kubenswrapper[4793]: I0127 22:20:48.242726 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:20:48 crc kubenswrapper[4793]: I0127 22:20:48.242960 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:20:48 crc kubenswrapper[4793]: I0127 22:20:48.295158 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 22:20:48 crc kubenswrapper[4793]: I0127 22:20:48.804134 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:20:48 crc kubenswrapper[4793]: E0127 22:20:48.804989 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:20:49 crc kubenswrapper[4793]: I0127 22:20:49.140038 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 22:20:50 crc kubenswrapper[4793]: I0127 22:20:50.101952 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" exitCode=1 Jan 27 22:20:50 crc kubenswrapper[4793]: I0127 22:20:50.102044 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9"} Jan 27 22:20:50 crc kubenswrapper[4793]: I0127 22:20:50.102155 4793 scope.go:117] "RemoveContainer" containerID="baa43b733747bc8671d8cb2241e102e6d310714369fc2acb80eb22fb1d18b625" Jan 27 22:20:50 crc kubenswrapper[4793]: I0127 22:20:50.102973 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:20:50 crc kubenswrapper[4793]: E0127 22:20:50.103460 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:51 crc kubenswrapper[4793]: I0127 22:20:51.120716 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:20:51 crc kubenswrapper[4793]: E0127 22:20:51.121891 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:53 crc kubenswrapper[4793]: I0127 22:20:53.243007 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:20:53 crc kubenswrapper[4793]: I0127 22:20:53.244372 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:20:53 crc kubenswrapper[4793]: E0127 22:20:53.244670 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:20:58 crc kubenswrapper[4793]: I0127 22:20:58.243249 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:20:58 crc kubenswrapper[4793]: I0127 22:20:58.243842 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:20:58 crc kubenswrapper[4793]: I0127 22:20:58.244846 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:20:58 crc kubenswrapper[4793]: E0127 22:20:58.245264 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:21:01 crc kubenswrapper[4793]: I0127 22:21:01.803896 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:21:01 crc kubenswrapper[4793]: E0127 22:21:01.804671 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:21:08 crc kubenswrapper[4793]: I0127 22:21:08.880017 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:21:08 crc kubenswrapper[4793]: E0127 22:21:08.881027 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:21:13 crc kubenswrapper[4793]: I0127 22:21:13.803950 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:21:13 crc kubenswrapper[4793]: E0127 22:21:13.805205 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:21:22 crc kubenswrapper[4793]: I0127 22:21:22.803109 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:21:22 crc kubenswrapper[4793]: E0127 22:21:22.803957 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:21:25 crc kubenswrapper[4793]: I0127 22:21:25.818852 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:21:25 crc kubenswrapper[4793]: E0127 22:21:25.820652 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:21:33 crc kubenswrapper[4793]: I0127 22:21:33.804220 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:21:33 crc kubenswrapper[4793]: E0127 22:21:33.805488 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:21:37 crc kubenswrapper[4793]: I0127 22:21:37.804581 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:21:37 crc kubenswrapper[4793]: E0127 22:21:37.805382 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:21:44 crc kubenswrapper[4793]: I0127 22:21:44.805829 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:21:44 crc kubenswrapper[4793]: E0127 22:21:44.806625 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:21:52 crc kubenswrapper[4793]: I0127 22:21:52.804171 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:21:52 crc kubenswrapper[4793]: E0127 22:21:52.805254 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:21:58 crc kubenswrapper[4793]: I0127 22:21:58.804128 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:21:58 crc kubenswrapper[4793]: E0127 22:21:58.805749 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:22:06 crc kubenswrapper[4793]: I0127 22:22:06.805206 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:22:06 crc kubenswrapper[4793]: E0127 22:22:06.806504 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:22:10 crc kubenswrapper[4793]: I0127 22:22:10.804321 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:22:10 crc kubenswrapper[4793]: E0127 22:22:10.805513 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:22:21 crc kubenswrapper[4793]: I0127 22:22:21.803453 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:22:21 crc kubenswrapper[4793]: E0127 22:22:21.804251 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:22:22 crc kubenswrapper[4793]: I0127 22:22:22.804999 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:22:22 crc kubenswrapper[4793]: E0127 22:22:22.805681 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:22:33 crc kubenswrapper[4793]: I0127 22:22:33.882841 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:22:33 crc kubenswrapper[4793]: E0127 22:22:33.887021 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:22:35 crc kubenswrapper[4793]: I0127 22:22:35.817027 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:22:35 crc kubenswrapper[4793]: E0127 22:22:35.817482 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:22:46 crc kubenswrapper[4793]: I0127 22:22:46.803938 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:22:46 crc kubenswrapper[4793]: E0127 22:22:46.805197 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:22:49 crc kubenswrapper[4793]: I0127 22:22:49.804125 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:22:49 crc kubenswrapper[4793]: E0127 22:22:49.804977 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:23:00 crc kubenswrapper[4793]: I0127 22:23:00.804352 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:23:00 crc kubenswrapper[4793]: E0127 22:23:00.805495 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:23:03 crc kubenswrapper[4793]: I0127 22:23:03.805464 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:23:03 crc kubenswrapper[4793]: E0127 22:23:03.807302 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.833747 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:08 crc kubenswrapper[4793]: E0127 22:23:08.834605 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="extract-content" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.834618 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="extract-content" Jan 27 22:23:08 crc kubenswrapper[4793]: E0127 22:23:08.834658 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="registry-server" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.834664 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="registry-server" Jan 27 22:23:08 crc kubenswrapper[4793]: E0127 22:23:08.834676 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="extract-utilities" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.834682 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="extract-utilities" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.834892 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="967609bb-fe06-4539-82c4-969889e4d12e" containerName="registry-server" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.840965 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.866868 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.866946 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.870751 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72zr\" (UniqueName: \"kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.879179 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.989002 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72zr\" (UniqueName: \"kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.989120 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.989167 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.989712 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:08 crc kubenswrapper[4793]: I0127 22:23:08.990036 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:09 crc kubenswrapper[4793]: I0127 22:23:09.010066 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72zr\" (UniqueName: \"kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr\") pod \"redhat-marketplace-nqn7b\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:09 crc kubenswrapper[4793]: I0127 22:23:09.159054 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:09 crc kubenswrapper[4793]: I0127 22:23:09.715998 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:10 crc kubenswrapper[4793]: I0127 22:23:10.314737 4793 generic.go:334] "Generic (PLEG): container finished" podID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerID="e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422" exitCode=0 Jan 27 22:23:10 crc kubenswrapper[4793]: I0127 22:23:10.314842 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerDied","Data":"e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422"} Jan 27 22:23:10 crc kubenswrapper[4793]: I0127 22:23:10.315205 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerStarted","Data":"39acdd944cc54e3f7a34f8768c1b0a3740546db65dd8f3e3613a3b34b8fc478f"} Jan 27 22:23:10 crc kubenswrapper[4793]: I0127 22:23:10.316856 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:23:12 crc kubenswrapper[4793]: I0127 22:23:12.334558 4793 generic.go:334] "Generic (PLEG): container finished" podID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerID="9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07" exitCode=0 Jan 27 22:23:12 crc kubenswrapper[4793]: I0127 22:23:12.334633 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerDied","Data":"9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07"} Jan 27 22:23:13 crc kubenswrapper[4793]: I0127 22:23:13.392011 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerStarted","Data":"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6"} Jan 27 22:23:14 crc kubenswrapper[4793]: I0127 22:23:14.804244 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:23:14 crc kubenswrapper[4793]: E0127 22:23:14.804984 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:23:18 crc kubenswrapper[4793]: I0127 22:23:18.887204 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:23:18 crc kubenswrapper[4793]: E0127 22:23:18.887876 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.159498 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.159778 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.202172 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.221075 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nqn7b" podStartSLOduration=8.801857872 podStartE2EDuration="11.221057666s" podCreationTimestamp="2026-01-27 22:23:08 +0000 UTC" firstStartedPulling="2026-01-27 22:23:10.316532081 +0000 UTC m=+8415.706785247" lastFinishedPulling="2026-01-27 22:23:12.735731884 +0000 UTC m=+8418.125985041" observedRunningTime="2026-01-27 22:23:13.426481331 +0000 UTC m=+8418.816734487" watchObservedRunningTime="2026-01-27 22:23:19.221057666 +0000 UTC m=+8424.611310832" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.564746 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:19 crc kubenswrapper[4793]: I0127 22:23:19.625647 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:21 crc kubenswrapper[4793]: I0127 22:23:21.494864 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nqn7b" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="registry-server" containerID="cri-o://ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6" gracePeriod=2 Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.040436 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.162341 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content\") pod \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.162741 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h72zr\" (UniqueName: \"kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr\") pod \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.163062 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities\") pod \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\" (UID: \"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850\") " Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.164103 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities" (OuterVolumeSpecName: "utilities") pod "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" (UID: "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.173933 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr" (OuterVolumeSpecName: "kube-api-access-h72zr") pod "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" (UID: "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850"). InnerVolumeSpecName "kube-api-access-h72zr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.187576 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" (UID: "2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.267145 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.267216 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.267233 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h72zr\" (UniqueName: \"kubernetes.io/projected/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850-kube-api-access-h72zr\") on node \"crc\" DevicePath \"\"" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.510188 4793 generic.go:334] "Generic (PLEG): container finished" podID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerID="ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6" exitCode=0 Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.510251 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerDied","Data":"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6"} Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.510709 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqn7b" event={"ID":"2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850","Type":"ContainerDied","Data":"39acdd944cc54e3f7a34f8768c1b0a3740546db65dd8f3e3613a3b34b8fc478f"} Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.510739 4793 scope.go:117] "RemoveContainer" containerID="ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.510287 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqn7b" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.545595 4793 scope.go:117] "RemoveContainer" containerID="9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.567783 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.569305 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqn7b"] Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.574897 4793 scope.go:117] "RemoveContainer" containerID="e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.644064 4793 scope.go:117] "RemoveContainer" containerID="ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6" Jan 27 22:23:22 crc kubenswrapper[4793]: E0127 22:23:22.663808 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6\": container with ID starting with ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6 not found: ID does not exist" containerID="ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.663874 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6"} err="failed to get container status \"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6\": rpc error: code = NotFound desc = could not find container \"ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6\": container with ID starting with ebd520ca00a03033c68890b1d6e7da6e2cdc537e6eff460e7746d5b2483118f6 not found: ID does not exist" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.663906 4793 scope.go:117] "RemoveContainer" containerID="9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07" Jan 27 22:23:22 crc kubenswrapper[4793]: E0127 22:23:22.669610 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07\": container with ID starting with 9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07 not found: ID does not exist" containerID="9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.669799 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07"} err="failed to get container status \"9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07\": rpc error: code = NotFound desc = could not find container \"9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07\": container with ID starting with 9c2b4c61ac078c21b7d6aa6e7368795a536fa56bbf2b448d5262a9eb21b31f07 not found: ID does not exist" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.669895 4793 scope.go:117] "RemoveContainer" containerID="e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422" Jan 27 22:23:22 crc kubenswrapper[4793]: E0127 22:23:22.670532 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422\": container with ID starting with e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422 not found: ID does not exist" containerID="e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422" Jan 27 22:23:22 crc kubenswrapper[4793]: I0127 22:23:22.670621 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422"} err="failed to get container status \"e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422\": rpc error: code = NotFound desc = could not find container \"e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422\": container with ID starting with e56d5d2704a25839bfbab36549eb100d9b0d70bba17d11191edf890e9aa7b422 not found: ID does not exist" Jan 27 22:23:23 crc kubenswrapper[4793]: I0127 22:23:23.822316 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" path="/var/lib/kubelet/pods/2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850/volumes" Jan 27 22:23:26 crc kubenswrapper[4793]: I0127 22:23:26.857741 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:23:26 crc kubenswrapper[4793]: E0127 22:23:26.858565 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:23:33 crc kubenswrapper[4793]: I0127 22:23:33.804276 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:23:33 crc kubenswrapper[4793]: E0127 22:23:33.805270 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:23:37 crc kubenswrapper[4793]: I0127 22:23:37.804744 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:23:37 crc kubenswrapper[4793]: E0127 22:23:37.805821 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:23:46 crc kubenswrapper[4793]: I0127 22:23:46.803910 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:23:46 crc kubenswrapper[4793]: E0127 22:23:46.804747 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:23:50 crc kubenswrapper[4793]: I0127 22:23:50.804675 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:23:50 crc kubenswrapper[4793]: E0127 22:23:50.805736 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:23:58 crc kubenswrapper[4793]: I0127 22:23:58.803807 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:23:59 crc kubenswrapper[4793]: I0127 22:23:59.934710 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b"} Jan 27 22:24:04 crc kubenswrapper[4793]: I0127 22:24:04.804297 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:24:04 crc kubenswrapper[4793]: E0127 22:24:04.805340 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:24:18 crc kubenswrapper[4793]: I0127 22:24:18.803270 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:24:18 crc kubenswrapper[4793]: E0127 22:24:18.804249 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:24:30 crc kubenswrapper[4793]: I0127 22:24:30.804184 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:24:30 crc kubenswrapper[4793]: E0127 22:24:30.805364 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:24:42 crc kubenswrapper[4793]: I0127 22:24:42.803872 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:24:42 crc kubenswrapper[4793]: E0127 22:24:42.804872 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:24:54 crc kubenswrapper[4793]: I0127 22:24:54.804003 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:24:54 crc kubenswrapper[4793]: E0127 22:24:54.804838 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:25:07 crc kubenswrapper[4793]: I0127 22:25:07.804159 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:25:07 crc kubenswrapper[4793]: E0127 22:25:07.807117 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:25:21 crc kubenswrapper[4793]: I0127 22:25:21.804058 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:25:21 crc kubenswrapper[4793]: E0127 22:25:21.805233 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:25:34 crc kubenswrapper[4793]: I0127 22:25:34.804598 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:25:34 crc kubenswrapper[4793]: E0127 22:25:34.805676 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:25:49 crc kubenswrapper[4793]: I0127 22:25:49.804338 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:25:50 crc kubenswrapper[4793]: I0127 22:25:50.310907 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5"} Jan 27 22:25:53 crc kubenswrapper[4793]: I0127 22:25:53.243336 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:25:53 crc kubenswrapper[4793]: I0127 22:25:53.370963 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" exitCode=1 Jan 27 22:25:53 crc kubenswrapper[4793]: I0127 22:25:53.371015 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5"} Jan 27 22:25:53 crc kubenswrapper[4793]: I0127 22:25:53.371088 4793 scope.go:117] "RemoveContainer" containerID="ec141e959e4cf6bad1f8410c668a18b25ad24d78b5ba67ac56dfaf30eb33ecc9" Jan 27 22:25:53 crc kubenswrapper[4793]: I0127 22:25:53.372271 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:25:53 crc kubenswrapper[4793]: E0127 22:25:53.372907 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:25:58 crc kubenswrapper[4793]: I0127 22:25:58.242663 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:25:58 crc kubenswrapper[4793]: I0127 22:25:58.243264 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:25:58 crc kubenswrapper[4793]: I0127 22:25:58.243278 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:25:58 crc kubenswrapper[4793]: I0127 22:25:58.243924 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:25:58 crc kubenswrapper[4793]: E0127 22:25:58.244227 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:26:12 crc kubenswrapper[4793]: I0127 22:26:12.804363 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:26:12 crc kubenswrapper[4793]: E0127 22:26:12.806832 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:26:22 crc kubenswrapper[4793]: I0127 22:26:22.753239 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:26:22 crc kubenswrapper[4793]: I0127 22:26:22.753844 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:26:25 crc kubenswrapper[4793]: I0127 22:26:25.805134 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:26:25 crc kubenswrapper[4793]: E0127 22:26:25.806435 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:26:38 crc kubenswrapper[4793]: I0127 22:26:38.803900 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:26:38 crc kubenswrapper[4793]: E0127 22:26:38.805524 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:26:49 crc kubenswrapper[4793]: I0127 22:26:49.803596 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:26:49 crc kubenswrapper[4793]: E0127 22:26:49.804770 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:26:52 crc kubenswrapper[4793]: I0127 22:26:52.753232 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:26:52 crc kubenswrapper[4793]: I0127 22:26:52.754051 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.955700 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:26:59 crc kubenswrapper[4793]: E0127 22:26:59.957242 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="registry-server" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.957271 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="registry-server" Jan 27 22:26:59 crc kubenswrapper[4793]: E0127 22:26:59.957305 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="extract-utilities" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.957324 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="extract-utilities" Jan 27 22:26:59 crc kubenswrapper[4793]: E0127 22:26:59.957359 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="extract-content" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.957377 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="extract-content" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.957855 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e41af15-7dc2-4a9a-aaaa-9e7ed43b2850" containerName="registry-server" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.960972 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:26:59 crc kubenswrapper[4793]: I0127 22:26:59.969914 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.039133 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.039243 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gnmn\" (UniqueName: \"kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.039308 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.140671 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.140779 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gnmn\" (UniqueName: \"kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.140846 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.141319 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.142859 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.165988 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gnmn\" (UniqueName: \"kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn\") pod \"community-operators-xspgr\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:00 crc kubenswrapper[4793]: I0127 22:27:00.288254 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:01 crc kubenswrapper[4793]: I0127 22:27:01.108384 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:27:01 crc kubenswrapper[4793]: I0127 22:27:01.252927 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerStarted","Data":"195c775cd20e1d5e2523f6c58018d77849fd8dec0b4430b7578f8788043ef08d"} Jan 27 22:27:02 crc kubenswrapper[4793]: I0127 22:27:02.268762 4793 generic.go:334] "Generic (PLEG): container finished" podID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerID="11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b" exitCode=0 Jan 27 22:27:02 crc kubenswrapper[4793]: I0127 22:27:02.268887 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerDied","Data":"11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b"} Jan 27 22:27:03 crc kubenswrapper[4793]: I0127 22:27:03.804357 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:27:03 crc kubenswrapper[4793]: E0127 22:27:03.805603 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:27:04 crc kubenswrapper[4793]: I0127 22:27:04.297057 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerStarted","Data":"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5"} Jan 27 22:27:05 crc kubenswrapper[4793]: I0127 22:27:05.317523 4793 generic.go:334] "Generic (PLEG): container finished" podID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerID="36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5" exitCode=0 Jan 27 22:27:05 crc kubenswrapper[4793]: I0127 22:27:05.317694 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerDied","Data":"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5"} Jan 27 22:27:06 crc kubenswrapper[4793]: I0127 22:27:06.371998 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerStarted","Data":"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3"} Jan 27 22:27:06 crc kubenswrapper[4793]: I0127 22:27:06.401302 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xspgr" podStartSLOduration=3.95915742 podStartE2EDuration="7.401285719s" podCreationTimestamp="2026-01-27 22:26:59 +0000 UTC" firstStartedPulling="2026-01-27 22:27:02.271011166 +0000 UTC m=+8647.661264332" lastFinishedPulling="2026-01-27 22:27:05.713139435 +0000 UTC m=+8651.103392631" observedRunningTime="2026-01-27 22:27:06.39682013 +0000 UTC m=+8651.787073286" watchObservedRunningTime="2026-01-27 22:27:06.401285719 +0000 UTC m=+8651.791538875" Jan 27 22:27:10 crc kubenswrapper[4793]: I0127 22:27:10.288814 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:10 crc kubenswrapper[4793]: I0127 22:27:10.291255 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:11 crc kubenswrapper[4793]: I0127 22:27:11.359009 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xspgr" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="registry-server" probeResult="failure" output=< Jan 27 22:27:11 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:27:11 crc kubenswrapper[4793]: > Jan 27 22:27:14 crc kubenswrapper[4793]: I0127 22:27:14.803488 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:27:14 crc kubenswrapper[4793]: E0127 22:27:14.804496 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:27:20 crc kubenswrapper[4793]: I0127 22:27:20.361262 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:20 crc kubenswrapper[4793]: I0127 22:27:20.459437 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:20 crc kubenswrapper[4793]: I0127 22:27:20.620314 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:27:21 crc kubenswrapper[4793]: I0127 22:27:21.813914 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xspgr" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="registry-server" containerID="cri-o://1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3" gracePeriod=2 Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.278751 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.317616 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gnmn\" (UniqueName: \"kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn\") pod \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.317990 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities\") pod \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.318031 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content\") pod \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\" (UID: \"fd126c5f-fd82-4901-ae5c-7b42075c9a29\") " Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.319790 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities" (OuterVolumeSpecName: "utilities") pod "fd126c5f-fd82-4901-ae5c-7b42075c9a29" (UID: "fd126c5f-fd82-4901-ae5c-7b42075c9a29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.327396 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn" (OuterVolumeSpecName: "kube-api-access-7gnmn") pod "fd126c5f-fd82-4901-ae5c-7b42075c9a29" (UID: "fd126c5f-fd82-4901-ae5c-7b42075c9a29"). InnerVolumeSpecName "kube-api-access-7gnmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.381169 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd126c5f-fd82-4901-ae5c-7b42075c9a29" (UID: "fd126c5f-fd82-4901-ae5c-7b42075c9a29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.419506 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.419539 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd126c5f-fd82-4901-ae5c-7b42075c9a29-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.419564 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gnmn\" (UniqueName: \"kubernetes.io/projected/fd126c5f-fd82-4901-ae5c-7b42075c9a29-kube-api-access-7gnmn\") on node \"crc\" DevicePath \"\"" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.753239 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.753331 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.753453 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.755062 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.755186 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b" gracePeriod=600 Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.825176 4793 generic.go:334] "Generic (PLEG): container finished" podID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerID="1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3" exitCode=0 Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.825221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerDied","Data":"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3"} Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.825253 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xspgr" event={"ID":"fd126c5f-fd82-4901-ae5c-7b42075c9a29","Type":"ContainerDied","Data":"195c775cd20e1d5e2523f6c58018d77849fd8dec0b4430b7578f8788043ef08d"} Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.825274 4793 scope.go:117] "RemoveContainer" containerID="1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.825310 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xspgr" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.856955 4793 scope.go:117] "RemoveContainer" containerID="36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5" Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.889066 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.902661 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xspgr"] Jan 27 22:27:22 crc kubenswrapper[4793]: I0127 22:27:22.943802 4793 scope.go:117] "RemoveContainer" containerID="11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.033016 4793 scope.go:117] "RemoveContainer" containerID="1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3" Jan 27 22:27:23 crc kubenswrapper[4793]: E0127 22:27:23.033624 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3\": container with ID starting with 1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3 not found: ID does not exist" containerID="1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.033659 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3"} err="failed to get container status \"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3\": rpc error: code = NotFound desc = could not find container \"1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3\": container with ID starting with 1d50fac1606c0a514859015656d92be082ae690928aa58d92d450f898554e4d3 not found: ID does not exist" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.033700 4793 scope.go:117] "RemoveContainer" containerID="36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5" Jan 27 22:27:23 crc kubenswrapper[4793]: E0127 22:27:23.034648 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5\": container with ID starting with 36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5 not found: ID does not exist" containerID="36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.034701 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5"} err="failed to get container status \"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5\": rpc error: code = NotFound desc = could not find container \"36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5\": container with ID starting with 36c2d8bdf6965b4b6c5cc032e11bf92cbc1b8330f01e47cbb7931813c4a75ba5 not found: ID does not exist" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.034732 4793 scope.go:117] "RemoveContainer" containerID="11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b" Jan 27 22:27:23 crc kubenswrapper[4793]: E0127 22:27:23.035166 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b\": container with ID starting with 11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b not found: ID does not exist" containerID="11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.035230 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b"} err="failed to get container status \"11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b\": rpc error: code = NotFound desc = could not find container \"11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b\": container with ID starting with 11dbc4c1fda32477ddac49621ea608c9f39b2cbd0a6b33959eb2432b7416db5b not found: ID does not exist" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.814149 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" path="/var/lib/kubelet/pods/fd126c5f-fd82-4901-ae5c-7b42075c9a29/volumes" Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.838475 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b" exitCode=0 Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.838512 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b"} Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.838535 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a"} Jan 27 22:27:23 crc kubenswrapper[4793]: I0127 22:27:23.838566 4793 scope.go:117] "RemoveContainer" containerID="2f05cd0ef441fe31f8c37cb2a23d864917864a665c9ae0e95c3afb8cafbfe120" Jan 27 22:27:29 crc kubenswrapper[4793]: I0127 22:27:29.804257 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:27:29 crc kubenswrapper[4793]: E0127 22:27:29.805130 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:27:43 crc kubenswrapper[4793]: I0127 22:27:43.803827 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:27:43 crc kubenswrapper[4793]: E0127 22:27:43.804752 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:27:54 crc kubenswrapper[4793]: I0127 22:27:54.804936 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:27:54 crc kubenswrapper[4793]: E0127 22:27:54.809182 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:07 crc kubenswrapper[4793]: I0127 22:28:07.803711 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:28:07 crc kubenswrapper[4793]: E0127 22:28:07.804748 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:19 crc kubenswrapper[4793]: I0127 22:28:19.885240 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:28:19 crc kubenswrapper[4793]: E0127 22:28:19.900209 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.205899 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:21 crc kubenswrapper[4793]: E0127 22:28:21.206739 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="extract-utilities" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.206755 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="extract-utilities" Jan 27 22:28:21 crc kubenswrapper[4793]: E0127 22:28:21.206797 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="registry-server" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.206803 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="registry-server" Jan 27 22:28:21 crc kubenswrapper[4793]: E0127 22:28:21.206820 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="extract-content" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.206827 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="extract-content" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.207043 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd126c5f-fd82-4901-ae5c-7b42075c9a29" containerName="registry-server" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.208601 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.229488 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.328983 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.329072 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxb7d\" (UniqueName: \"kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.330033 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.432249 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.432303 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.432341 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxb7d\" (UniqueName: \"kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.432888 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.433073 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.456240 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxb7d\" (UniqueName: \"kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d\") pod \"redhat-operators-qd4pt\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:21 crc kubenswrapper[4793]: I0127 22:28:21.568367 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:22 crc kubenswrapper[4793]: I0127 22:28:22.122127 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:23 crc kubenswrapper[4793]: I0127 22:28:23.099507 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerID="b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703" exitCode=0 Jan 27 22:28:23 crc kubenswrapper[4793]: I0127 22:28:23.099659 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerDied","Data":"b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703"} Jan 27 22:28:23 crc kubenswrapper[4793]: I0127 22:28:23.099881 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerStarted","Data":"9a916d8f5a208d17679e3ef67788bac4b7d156a2620c1f5e0c04b5b1e94725ad"} Jan 27 22:28:23 crc kubenswrapper[4793]: I0127 22:28:23.102744 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:28:25 crc kubenswrapper[4793]: I0127 22:28:25.128315 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerStarted","Data":"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b"} Jan 27 22:28:30 crc kubenswrapper[4793]: I0127 22:28:30.193413 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerID="04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b" exitCode=0 Jan 27 22:28:30 crc kubenswrapper[4793]: I0127 22:28:30.193482 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerDied","Data":"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b"} Jan 27 22:28:30 crc kubenswrapper[4793]: I0127 22:28:30.802720 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:28:30 crc kubenswrapper[4793]: E0127 22:28:30.803333 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:31 crc kubenswrapper[4793]: I0127 22:28:31.205242 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerStarted","Data":"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af"} Jan 27 22:28:31 crc kubenswrapper[4793]: I0127 22:28:31.234444 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qd4pt" podStartSLOduration=2.698369039 podStartE2EDuration="10.234420017s" podCreationTimestamp="2026-01-27 22:28:21 +0000 UTC" firstStartedPulling="2026-01-27 22:28:23.102165313 +0000 UTC m=+8728.492418499" lastFinishedPulling="2026-01-27 22:28:30.638216311 +0000 UTC m=+8736.028469477" observedRunningTime="2026-01-27 22:28:31.226371463 +0000 UTC m=+8736.616624649" watchObservedRunningTime="2026-01-27 22:28:31.234420017 +0000 UTC m=+8736.624673173" Jan 27 22:28:31 crc kubenswrapper[4793]: I0127 22:28:31.568686 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:31 crc kubenswrapper[4793]: I0127 22:28:31.568776 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:32 crc kubenswrapper[4793]: I0127 22:28:32.648829 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qd4pt" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="registry-server" probeResult="failure" output=< Jan 27 22:28:32 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:28:32 crc kubenswrapper[4793]: > Jan 27 22:28:41 crc kubenswrapper[4793]: I0127 22:28:41.635264 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:41 crc kubenswrapper[4793]: I0127 22:28:41.689942 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:41 crc kubenswrapper[4793]: I0127 22:28:41.878811 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.350041 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qd4pt" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="registry-server" containerID="cri-o://a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af" gracePeriod=2 Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.831601 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.934289 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities\") pod \"8d923196-b3fe-4f48-91da-19a0e0429d84\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.934479 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content\") pod \"8d923196-b3fe-4f48-91da-19a0e0429d84\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.934632 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxb7d\" (UniqueName: \"kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d\") pod \"8d923196-b3fe-4f48-91da-19a0e0429d84\" (UID: \"8d923196-b3fe-4f48-91da-19a0e0429d84\") " Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.935844 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities" (OuterVolumeSpecName: "utilities") pod "8d923196-b3fe-4f48-91da-19a0e0429d84" (UID: "8d923196-b3fe-4f48-91da-19a0e0429d84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:28:43 crc kubenswrapper[4793]: I0127 22:28:43.942534 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d" (OuterVolumeSpecName: "kube-api-access-qxb7d") pod "8d923196-b3fe-4f48-91da-19a0e0429d84" (UID: "8d923196-b3fe-4f48-91da-19a0e0429d84"). InnerVolumeSpecName "kube-api-access-qxb7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.037141 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxb7d\" (UniqueName: \"kubernetes.io/projected/8d923196-b3fe-4f48-91da-19a0e0429d84-kube-api-access-qxb7d\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.037186 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.099674 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d923196-b3fe-4f48-91da-19a0e0429d84" (UID: "8d923196-b3fe-4f48-91da-19a0e0429d84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.139380 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d923196-b3fe-4f48-91da-19a0e0429d84-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.362088 4793 generic.go:334] "Generic (PLEG): container finished" podID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerID="a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af" exitCode=0 Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.362157 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerDied","Data":"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af"} Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.362194 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qd4pt" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.362208 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qd4pt" event={"ID":"8d923196-b3fe-4f48-91da-19a0e0429d84","Type":"ContainerDied","Data":"9a916d8f5a208d17679e3ef67788bac4b7d156a2620c1f5e0c04b5b1e94725ad"} Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.362244 4793 scope.go:117] "RemoveContainer" containerID="a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.414218 4793 scope.go:117] "RemoveContainer" containerID="04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.431318 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.446636 4793 scope.go:117] "RemoveContainer" containerID="b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.448777 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qd4pt"] Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.504943 4793 scope.go:117] "RemoveContainer" containerID="a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af" Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.507159 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af\": container with ID starting with a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af not found: ID does not exist" containerID="a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.507229 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af"} err="failed to get container status \"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af\": rpc error: code = NotFound desc = could not find container \"a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af\": container with ID starting with a1e9bb8593c929890cee3a31cf130910847372bf5e34b1ce3ebeab0168f2f7af not found: ID does not exist" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.507268 4793 scope.go:117] "RemoveContainer" containerID="04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b" Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.508026 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b\": container with ID starting with 04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b not found: ID does not exist" containerID="04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.508065 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b"} err="failed to get container status \"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b\": rpc error: code = NotFound desc = could not find container \"04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b\": container with ID starting with 04617383e11cc7218c3c38e6b86f6ea720d6710f714789a24637bab5f0ca162b not found: ID does not exist" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.508096 4793 scope.go:117] "RemoveContainer" containerID="b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703" Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.508438 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703\": container with ID starting with b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703 not found: ID does not exist" containerID="b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.508484 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703"} err="failed to get container status \"b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703\": rpc error: code = NotFound desc = could not find container \"b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703\": container with ID starting with b056a57470c993ff60c69ed378888502d0b741ffe9c06767a511aaf2de544703 not found: ID does not exist" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.684970 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.685696 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="extract-utilities" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.685776 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="extract-utilities" Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.685853 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="registry-server" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.685908 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="registry-server" Jan 27 22:28:44 crc kubenswrapper[4793]: E0127 22:28:44.685971 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="extract-content" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.686049 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="extract-content" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.686313 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" containerName="registry-server" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.687797 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.729838 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.864656 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.864711 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.864930 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x57qm\" (UniqueName: \"kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.967215 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x57qm\" (UniqueName: \"kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.967465 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.967502 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.968173 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.968408 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:44 crc kubenswrapper[4793]: I0127 22:28:44.995452 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x57qm\" (UniqueName: \"kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm\") pod \"certified-operators-qdbz5\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:45 crc kubenswrapper[4793]: I0127 22:28:45.024261 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:45 crc kubenswrapper[4793]: I0127 22:28:45.585730 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:45 crc kubenswrapper[4793]: I0127 22:28:45.806964 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:28:45 crc kubenswrapper[4793]: E0127 22:28:45.807810 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:45 crc kubenswrapper[4793]: I0127 22:28:45.824619 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d923196-b3fe-4f48-91da-19a0e0429d84" path="/var/lib/kubelet/pods/8d923196-b3fe-4f48-91da-19a0e0429d84/volumes" Jan 27 22:28:46 crc kubenswrapper[4793]: I0127 22:28:46.384820 4793 generic.go:334] "Generic (PLEG): container finished" podID="62c10347-798c-44f2-9361-71775635409e" containerID="bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6" exitCode=0 Jan 27 22:28:46 crc kubenswrapper[4793]: I0127 22:28:46.384899 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerDied","Data":"bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6"} Jan 27 22:28:46 crc kubenswrapper[4793]: I0127 22:28:46.384968 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerStarted","Data":"41e0237dcaf9d03f32d59ed97f175885f400e19bb609198ef3cf6ef98107ef21"} Jan 27 22:28:48 crc kubenswrapper[4793]: I0127 22:28:48.407768 4793 generic.go:334] "Generic (PLEG): container finished" podID="62c10347-798c-44f2-9361-71775635409e" containerID="5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536" exitCode=0 Jan 27 22:28:48 crc kubenswrapper[4793]: I0127 22:28:48.408022 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerDied","Data":"5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536"} Jan 27 22:28:49 crc kubenswrapper[4793]: I0127 22:28:49.422326 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerStarted","Data":"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf"} Jan 27 22:28:49 crc kubenswrapper[4793]: I0127 22:28:49.453874 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qdbz5" podStartSLOduration=2.780787997 podStartE2EDuration="5.453833922s" podCreationTimestamp="2026-01-27 22:28:44 +0000 UTC" firstStartedPulling="2026-01-27 22:28:46.387697768 +0000 UTC m=+8751.777950934" lastFinishedPulling="2026-01-27 22:28:49.060743693 +0000 UTC m=+8754.450996859" observedRunningTime="2026-01-27 22:28:49.446671929 +0000 UTC m=+8754.836925095" watchObservedRunningTime="2026-01-27 22:28:49.453833922 +0000 UTC m=+8754.844087178" Jan 27 22:28:55 crc kubenswrapper[4793]: I0127 22:28:55.025690 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:55 crc kubenswrapper[4793]: I0127 22:28:55.026158 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:55 crc kubenswrapper[4793]: I0127 22:28:55.088279 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:55 crc kubenswrapper[4793]: I0127 22:28:55.567967 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:55 crc kubenswrapper[4793]: I0127 22:28:55.636404 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:57 crc kubenswrapper[4793]: I0127 22:28:57.515931 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qdbz5" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="registry-server" containerID="cri-o://2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf" gracePeriod=2 Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.223908 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.383056 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x57qm\" (UniqueName: \"kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm\") pod \"62c10347-798c-44f2-9361-71775635409e\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.383299 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities\") pod \"62c10347-798c-44f2-9361-71775635409e\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.383384 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content\") pod \"62c10347-798c-44f2-9361-71775635409e\" (UID: \"62c10347-798c-44f2-9361-71775635409e\") " Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.385534 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities" (OuterVolumeSpecName: "utilities") pod "62c10347-798c-44f2-9361-71775635409e" (UID: "62c10347-798c-44f2-9361-71775635409e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.395082 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm" (OuterVolumeSpecName: "kube-api-access-x57qm") pod "62c10347-798c-44f2-9361-71775635409e" (UID: "62c10347-798c-44f2-9361-71775635409e"). InnerVolumeSpecName "kube-api-access-x57qm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.439563 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62c10347-798c-44f2-9361-71775635409e" (UID: "62c10347-798c-44f2-9361-71775635409e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.486342 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x57qm\" (UniqueName: \"kubernetes.io/projected/62c10347-798c-44f2-9361-71775635409e-kube-api-access-x57qm\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.486379 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.486394 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c10347-798c-44f2-9361-71775635409e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.532657 4793 generic.go:334] "Generic (PLEG): container finished" podID="62c10347-798c-44f2-9361-71775635409e" containerID="2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf" exitCode=0 Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.532707 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerDied","Data":"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf"} Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.532734 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdbz5" event={"ID":"62c10347-798c-44f2-9361-71775635409e","Type":"ContainerDied","Data":"41e0237dcaf9d03f32d59ed97f175885f400e19bb609198ef3cf6ef98107ef21"} Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.532752 4793 scope.go:117] "RemoveContainer" containerID="2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.532896 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdbz5" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.569417 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.578047 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qdbz5"] Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.580435 4793 scope.go:117] "RemoveContainer" containerID="5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.612208 4793 scope.go:117] "RemoveContainer" containerID="bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.686784 4793 scope.go:117] "RemoveContainer" containerID="2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf" Jan 27 22:28:58 crc kubenswrapper[4793]: E0127 22:28:58.687306 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf\": container with ID starting with 2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf not found: ID does not exist" containerID="2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.687342 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf"} err="failed to get container status \"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf\": rpc error: code = NotFound desc = could not find container \"2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf\": container with ID starting with 2633d555d9820d32d0c2f27ed377ed89a214ad530ac1d5e883481e9aafddcecf not found: ID does not exist" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.687363 4793 scope.go:117] "RemoveContainer" containerID="5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536" Jan 27 22:28:58 crc kubenswrapper[4793]: E0127 22:28:58.687725 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536\": container with ID starting with 5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536 not found: ID does not exist" containerID="5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.687747 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536"} err="failed to get container status \"5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536\": rpc error: code = NotFound desc = could not find container \"5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536\": container with ID starting with 5aa52734fe6f441b31b2cafea6b15923932a5136f1bc95bf23ce8904601d3536 not found: ID does not exist" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.687759 4793 scope.go:117] "RemoveContainer" containerID="bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6" Jan 27 22:28:58 crc kubenswrapper[4793]: E0127 22:28:58.688013 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6\": container with ID starting with bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6 not found: ID does not exist" containerID="bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.688038 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6"} err="failed to get container status \"bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6\": rpc error: code = NotFound desc = could not find container \"bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6\": container with ID starting with bb3521949e80704428a0164e8e4b3096d87a4abc42fb4e653e1f6d26d70fd7d6 not found: ID does not exist" Jan 27 22:28:58 crc kubenswrapper[4793]: I0127 22:28:58.803841 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:28:58 crc kubenswrapper[4793]: E0127 22:28:58.804213 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:28:59 crc kubenswrapper[4793]: I0127 22:28:59.816593 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62c10347-798c-44f2-9361-71775635409e" path="/var/lib/kubelet/pods/62c10347-798c-44f2-9361-71775635409e/volumes" Jan 27 22:29:13 crc kubenswrapper[4793]: I0127 22:29:13.804535 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:29:13 crc kubenswrapper[4793]: E0127 22:29:13.806084 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:29:24 crc kubenswrapper[4793]: I0127 22:29:24.804132 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:29:24 crc kubenswrapper[4793]: E0127 22:29:24.805379 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:29:36 crc kubenswrapper[4793]: I0127 22:29:36.803596 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:29:36 crc kubenswrapper[4793]: E0127 22:29:36.804724 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:29:49 crc kubenswrapper[4793]: I0127 22:29:49.804257 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:29:49 crc kubenswrapper[4793]: E0127 22:29:49.805494 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:29:52 crc kubenswrapper[4793]: I0127 22:29:52.753288 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:29:52 crc kubenswrapper[4793]: I0127 22:29:52.753826 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.189868 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9"] Jan 27 22:30:00 crc kubenswrapper[4793]: E0127 22:30:00.190965 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="registry-server" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.190982 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="registry-server" Jan 27 22:30:00 crc kubenswrapper[4793]: E0127 22:30:00.191010 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="extract-utilities" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.191019 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="extract-utilities" Jan 27 22:30:00 crc kubenswrapper[4793]: E0127 22:30:00.191034 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="extract-content" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.191043 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="extract-content" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.191305 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="62c10347-798c-44f2-9361-71775635409e" containerName="registry-server" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.192295 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.194816 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.195029 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.201956 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9"] Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.340423 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.340463 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.340866 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg7f9\" (UniqueName: \"kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.443792 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.443869 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.444115 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg7f9\" (UniqueName: \"kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.444911 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.450695 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.470890 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg7f9\" (UniqueName: \"kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9\") pod \"collect-profiles-29492550-5hxt9\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:00 crc kubenswrapper[4793]: I0127 22:30:00.521208 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:01 crc kubenswrapper[4793]: I0127 22:30:01.038008 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9"] Jan 27 22:30:01 crc kubenswrapper[4793]: W0127 22:30:01.040237 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd5bea73_d0f3_422b_9ba9_c37fe2fc5f69.slice/crio-b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda WatchSource:0}: Error finding container b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda: Status 404 returned error can't find the container with id b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda Jan 27 22:30:01 crc kubenswrapper[4793]: I0127 22:30:01.413772 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" event={"ID":"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69","Type":"ContainerStarted","Data":"4f657e16faff1ad8a03894649d4905ff8485acd3a9ba45249b446c1b1d438651"} Jan 27 22:30:01 crc kubenswrapper[4793]: I0127 22:30:01.414069 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" event={"ID":"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69","Type":"ContainerStarted","Data":"b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda"} Jan 27 22:30:01 crc kubenswrapper[4793]: I0127 22:30:01.456158 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" podStartSLOduration=1.456135228 podStartE2EDuration="1.456135228s" podCreationTimestamp="2026-01-27 22:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 22:30:01.448819711 +0000 UTC m=+8826.839072907" watchObservedRunningTime="2026-01-27 22:30:01.456135228 +0000 UTC m=+8826.846388404" Jan 27 22:30:02 crc kubenswrapper[4793]: I0127 22:30:02.428153 4793 generic.go:334] "Generic (PLEG): container finished" podID="cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" containerID="4f657e16faff1ad8a03894649d4905ff8485acd3a9ba45249b446c1b1d438651" exitCode=0 Jan 27 22:30:02 crc kubenswrapper[4793]: I0127 22:30:02.428410 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" event={"ID":"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69","Type":"ContainerDied","Data":"4f657e16faff1ad8a03894649d4905ff8485acd3a9ba45249b446c1b1d438651"} Jan 27 22:30:03 crc kubenswrapper[4793]: I0127 22:30:03.866226 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.025362 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg7f9\" (UniqueName: \"kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9\") pod \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.025595 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume\") pod \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.025694 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume\") pod \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\" (UID: \"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69\") " Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.026247 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume" (OuterVolumeSpecName: "config-volume") pod "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" (UID: "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.032880 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" (UID: "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.032974 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9" (OuterVolumeSpecName: "kube-api-access-rg7f9") pod "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" (UID: "cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69"). InnerVolumeSpecName "kube-api-access-rg7f9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.127866 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.127896 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.127907 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg7f9\" (UniqueName: \"kubernetes.io/projected/cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69-kube-api-access-rg7f9\") on node \"crc\" DevicePath \"\"" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.449093 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" event={"ID":"cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69","Type":"ContainerDied","Data":"b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda"} Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.449134 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3fab2c46526507a92e410e1839768c47503b911a8f6945f0b035a2d7928edda" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.449135 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492550-5hxt9" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.803915 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:30:04 crc kubenswrapper[4793]: E0127 22:30:04.804626 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.952731 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr"] Jan 27 22:30:04 crc kubenswrapper[4793]: I0127 22:30:04.964382 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492505-vcqkr"] Jan 27 22:30:05 crc kubenswrapper[4793]: I0127 22:30:05.835195 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f539b49d-e419-4610-bfd9-d622db4abf43" path="/var/lib/kubelet/pods/f539b49d-e419-4610-bfd9-d622db4abf43/volumes" Jan 27 22:30:17 crc kubenswrapper[4793]: I0127 22:30:17.804169 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:30:17 crc kubenswrapper[4793]: E0127 22:30:17.805631 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:30:22 crc kubenswrapper[4793]: I0127 22:30:22.753531 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:30:22 crc kubenswrapper[4793]: I0127 22:30:22.754158 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:30:30 crc kubenswrapper[4793]: I0127 22:30:30.803524 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:30:30 crc kubenswrapper[4793]: E0127 22:30:30.804434 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:30:39 crc kubenswrapper[4793]: I0127 22:30:39.601688 4793 scope.go:117] "RemoveContainer" containerID="8f501427ee7cec7f3124e941a432211fc69546dd9d8c59befada6cff62bd4225" Jan 27 22:30:44 crc kubenswrapper[4793]: I0127 22:30:44.803671 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:30:44 crc kubenswrapper[4793]: E0127 22:30:44.804507 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:30:52 crc kubenswrapper[4793]: I0127 22:30:52.753183 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:30:52 crc kubenswrapper[4793]: I0127 22:30:52.753690 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:30:52 crc kubenswrapper[4793]: I0127 22:30:52.753745 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:30:52 crc kubenswrapper[4793]: I0127 22:30:52.754714 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:30:52 crc kubenswrapper[4793]: I0127 22:30:52.754776 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" gracePeriod=600 Jan 27 22:30:52 crc kubenswrapper[4793]: E0127 22:30:52.896725 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:30:53 crc kubenswrapper[4793]: I0127 22:30:53.086646 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" exitCode=0 Jan 27 22:30:53 crc kubenswrapper[4793]: I0127 22:30:53.086759 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a"} Jan 27 22:30:53 crc kubenswrapper[4793]: I0127 22:30:53.087121 4793 scope.go:117] "RemoveContainer" containerID="93cf59494bb19fc8050185e48fbf45869ee91bc1a663593ddebb8db2f7a2582b" Jan 27 22:30:53 crc kubenswrapper[4793]: I0127 22:30:53.088124 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:30:53 crc kubenswrapper[4793]: E0127 22:30:53.088736 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:30:57 crc kubenswrapper[4793]: I0127 22:30:57.804372 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:30:59 crc kubenswrapper[4793]: I0127 22:30:59.166363 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de"} Jan 27 22:31:01 crc kubenswrapper[4793]: I0127 22:31:01.193229 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" exitCode=1 Jan 27 22:31:01 crc kubenswrapper[4793]: I0127 22:31:01.193333 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de"} Jan 27 22:31:01 crc kubenswrapper[4793]: I0127 22:31:01.195951 4793 scope.go:117] "RemoveContainer" containerID="c113885b771d64161f60512f88ee65651f189e837e8fe22f5fd2a3852f8f85e5" Jan 27 22:31:01 crc kubenswrapper[4793]: I0127 22:31:01.197227 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:01 crc kubenswrapper[4793]: E0127 22:31:01.197698 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:03 crc kubenswrapper[4793]: I0127 22:31:03.243205 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:31:03 crc kubenswrapper[4793]: I0127 22:31:03.244382 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:03 crc kubenswrapper[4793]: E0127 22:31:03.244711 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:07 crc kubenswrapper[4793]: I0127 22:31:07.804800 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:31:07 crc kubenswrapper[4793]: E0127 22:31:07.805947 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:31:08 crc kubenswrapper[4793]: I0127 22:31:08.242810 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:31:08 crc kubenswrapper[4793]: I0127 22:31:08.243059 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:31:08 crc kubenswrapper[4793]: I0127 22:31:08.243119 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:31:08 crc kubenswrapper[4793]: I0127 22:31:08.243718 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:08 crc kubenswrapper[4793]: E0127 22:31:08.244054 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:08 crc kubenswrapper[4793]: I0127 22:31:08.292291 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:08 crc kubenswrapper[4793]: E0127 22:31:08.292793 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:19 crc kubenswrapper[4793]: I0127 22:31:19.817641 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:19 crc kubenswrapper[4793]: E0127 22:31:19.818631 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:22 crc kubenswrapper[4793]: I0127 22:31:22.803337 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:31:22 crc kubenswrapper[4793]: E0127 22:31:22.804003 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:31:34 crc kubenswrapper[4793]: I0127 22:31:34.804514 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:34 crc kubenswrapper[4793]: E0127 22:31:34.805515 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:37 crc kubenswrapper[4793]: I0127 22:31:37.803833 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:31:37 crc kubenswrapper[4793]: E0127 22:31:37.804912 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:31:47 crc kubenswrapper[4793]: I0127 22:31:47.804464 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:47 crc kubenswrapper[4793]: E0127 22:31:47.805534 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:31:48 crc kubenswrapper[4793]: I0127 22:31:48.802818 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:31:48 crc kubenswrapper[4793]: E0127 22:31:48.803266 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:31:59 crc kubenswrapper[4793]: I0127 22:31:59.803442 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:31:59 crc kubenswrapper[4793]: E0127 22:31:59.804093 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:32:02 crc kubenswrapper[4793]: I0127 22:32:02.803773 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:32:02 crc kubenswrapper[4793]: E0127 22:32:02.804428 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:32:12 crc kubenswrapper[4793]: I0127 22:32:12.810351 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:32:12 crc kubenswrapper[4793]: E0127 22:32:12.814922 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:32:15 crc kubenswrapper[4793]: I0127 22:32:15.814196 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:32:15 crc kubenswrapper[4793]: E0127 22:32:15.815012 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:32:25 crc kubenswrapper[4793]: I0127 22:32:25.826918 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:32:25 crc kubenswrapper[4793]: E0127 22:32:25.829218 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:32:26 crc kubenswrapper[4793]: I0127 22:32:26.804958 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:32:26 crc kubenswrapper[4793]: E0127 22:32:26.805675 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:32:36 crc kubenswrapper[4793]: I0127 22:32:36.803483 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:32:36 crc kubenswrapper[4793]: E0127 22:32:36.804159 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:32:37 crc kubenswrapper[4793]: I0127 22:32:37.804273 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:32:37 crc kubenswrapper[4793]: E0127 22:32:37.804791 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:32:48 crc kubenswrapper[4793]: I0127 22:32:48.803738 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:32:48 crc kubenswrapper[4793]: E0127 22:32:48.804614 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:32:52 crc kubenswrapper[4793]: I0127 22:32:52.804158 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:32:52 crc kubenswrapper[4793]: E0127 22:32:52.805386 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:33:01 crc kubenswrapper[4793]: I0127 22:33:01.803591 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:33:01 crc kubenswrapper[4793]: E0127 22:33:01.804479 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:33:07 crc kubenswrapper[4793]: I0127 22:33:07.802984 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:33:07 crc kubenswrapper[4793]: E0127 22:33:07.803829 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:33:14 crc kubenswrapper[4793]: I0127 22:33:14.805318 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:33:14 crc kubenswrapper[4793]: E0127 22:33:14.806372 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.745074 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:21 crc kubenswrapper[4793]: E0127 22:33:21.746296 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" containerName="collect-profiles" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.746315 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" containerName="collect-profiles" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.746726 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd5bea73-d0f3-422b-9ba9-c37fe2fc5f69" containerName="collect-profiles" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.748606 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.765149 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.827036 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.827123 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.827227 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br8gx\" (UniqueName: \"kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.929754 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.929849 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.929965 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br8gx\" (UniqueName: \"kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.930393 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.930670 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:21 crc kubenswrapper[4793]: I0127 22:33:21.959614 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br8gx\" (UniqueName: \"kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx\") pod \"redhat-marketplace-dpn2t\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:22 crc kubenswrapper[4793]: I0127 22:33:22.092832 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:22 crc kubenswrapper[4793]: I0127 22:33:22.579350 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:22 crc kubenswrapper[4793]: I0127 22:33:22.719218 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerStarted","Data":"9944cf5ced1ca3984ef786273bb5077b159ba48779a4eb7067e7dfc073f94e49"} Jan 27 22:33:22 crc kubenswrapper[4793]: I0127 22:33:22.806639 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:33:22 crc kubenswrapper[4793]: E0127 22:33:22.807259 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:33:23 crc kubenswrapper[4793]: I0127 22:33:23.950931 4793 generic.go:334] "Generic (PLEG): container finished" podID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerID="04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849" exitCode=0 Jan 27 22:33:23 crc kubenswrapper[4793]: I0127 22:33:23.952234 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerDied","Data":"04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849"} Jan 27 22:33:23 crc kubenswrapper[4793]: I0127 22:33:23.953410 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:33:25 crc kubenswrapper[4793]: I0127 22:33:25.991085 4793 generic.go:334] "Generic (PLEG): container finished" podID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerID="68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670" exitCode=0 Jan 27 22:33:25 crc kubenswrapper[4793]: I0127 22:33:25.991225 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerDied","Data":"68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670"} Jan 27 22:33:26 crc kubenswrapper[4793]: I0127 22:33:26.803939 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:33:26 crc kubenswrapper[4793]: E0127 22:33:26.804483 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:33:27 crc kubenswrapper[4793]: I0127 22:33:27.005911 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerStarted","Data":"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17"} Jan 27 22:33:27 crc kubenswrapper[4793]: I0127 22:33:27.034071 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dpn2t" podStartSLOduration=3.490073169 podStartE2EDuration="6.034051782s" podCreationTimestamp="2026-01-27 22:33:21 +0000 UTC" firstStartedPulling="2026-01-27 22:33:23.953180312 +0000 UTC m=+9029.343433468" lastFinishedPulling="2026-01-27 22:33:26.497158935 +0000 UTC m=+9031.887412081" observedRunningTime="2026-01-27 22:33:27.029088531 +0000 UTC m=+9032.419341707" watchObservedRunningTime="2026-01-27 22:33:27.034051782 +0000 UTC m=+9032.424304938" Jan 27 22:33:32 crc kubenswrapper[4793]: I0127 22:33:32.093255 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:32 crc kubenswrapper[4793]: I0127 22:33:32.093859 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:32 crc kubenswrapper[4793]: I0127 22:33:32.153765 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:33 crc kubenswrapper[4793]: I0127 22:33:33.126533 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:33 crc kubenswrapper[4793]: I0127 22:33:33.184797 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:35 crc kubenswrapper[4793]: I0127 22:33:35.091742 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dpn2t" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="registry-server" containerID="cri-o://3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17" gracePeriod=2 Jan 27 22:33:35 crc kubenswrapper[4793]: I0127 22:33:35.672768 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.102351 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br8gx\" (UniqueName: \"kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx\") pod \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.102451 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content\") pod \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.102539 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities\") pod \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\" (UID: \"97f970fb-1266-48a5-8b46-ef0d0cfb295b\") " Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.108427 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities" (OuterVolumeSpecName: "utilities") pod "97f970fb-1266-48a5-8b46-ef0d0cfb295b" (UID: "97f970fb-1266-48a5-8b46-ef0d0cfb295b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.108927 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.125117 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx" (OuterVolumeSpecName: "kube-api-access-br8gx") pod "97f970fb-1266-48a5-8b46-ef0d0cfb295b" (UID: "97f970fb-1266-48a5-8b46-ef0d0cfb295b"). InnerVolumeSpecName "kube-api-access-br8gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.163512 4793 generic.go:334] "Generic (PLEG): container finished" podID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerID="3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17" exitCode=0 Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.163624 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpn2t" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.165096 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "97f970fb-1266-48a5-8b46-ef0d0cfb295b" (UID: "97f970fb-1266-48a5-8b46-ef0d0cfb295b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.165120 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerDied","Data":"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17"} Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.165157 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpn2t" event={"ID":"97f970fb-1266-48a5-8b46-ef0d0cfb295b","Type":"ContainerDied","Data":"9944cf5ced1ca3984ef786273bb5077b159ba48779a4eb7067e7dfc073f94e49"} Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.165177 4793 scope.go:117] "RemoveContainer" containerID="3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.212985 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br8gx\" (UniqueName: \"kubernetes.io/projected/97f970fb-1266-48a5-8b46-ef0d0cfb295b-kube-api-access-br8gx\") on node \"crc\" DevicePath \"\"" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.213021 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f970fb-1266-48a5-8b46-ef0d0cfb295b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.279810 4793 scope.go:117] "RemoveContainer" containerID="68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.317016 4793 scope.go:117] "RemoveContainer" containerID="04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.350063 4793 scope.go:117] "RemoveContainer" containerID="3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17" Jan 27 22:33:36 crc kubenswrapper[4793]: E0127 22:33:36.350513 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17\": container with ID starting with 3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17 not found: ID does not exist" containerID="3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.350566 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17"} err="failed to get container status \"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17\": rpc error: code = NotFound desc = could not find container \"3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17\": container with ID starting with 3f66862b17c2a077189c61e5e013e32edee25d54a22915444783a3b98f4fde17 not found: ID does not exist" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.350589 4793 scope.go:117] "RemoveContainer" containerID="68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670" Jan 27 22:33:36 crc kubenswrapper[4793]: E0127 22:33:36.351223 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670\": container with ID starting with 68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670 not found: ID does not exist" containerID="68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.351253 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670"} err="failed to get container status \"68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670\": rpc error: code = NotFound desc = could not find container \"68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670\": container with ID starting with 68c415a0a5e31c98cf49f1772e28cd03fcf2e1c354386e9dbc96573e928da670 not found: ID does not exist" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.351275 4793 scope.go:117] "RemoveContainer" containerID="04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849" Jan 27 22:33:36 crc kubenswrapper[4793]: E0127 22:33:36.351662 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849\": container with ID starting with 04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849 not found: ID does not exist" containerID="04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.351694 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849"} err="failed to get container status \"04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849\": rpc error: code = NotFound desc = could not find container \"04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849\": container with ID starting with 04dfabbcbbd2646dd18200f44468e343d0f8960dc9c3b8a7fc2245d2df489849 not found: ID does not exist" Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.501077 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:36 crc kubenswrapper[4793]: I0127 22:33:36.534019 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpn2t"] Jan 27 22:33:37 crc kubenswrapper[4793]: I0127 22:33:37.803857 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:33:37 crc kubenswrapper[4793]: E0127 22:33:37.804450 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:33:37 crc kubenswrapper[4793]: I0127 22:33:37.817013 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" path="/var/lib/kubelet/pods/97f970fb-1266-48a5-8b46-ef0d0cfb295b/volumes" Jan 27 22:33:39 crc kubenswrapper[4793]: I0127 22:33:39.803531 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:33:39 crc kubenswrapper[4793]: E0127 22:33:39.804666 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:33:48 crc kubenswrapper[4793]: I0127 22:33:48.805224 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:33:48 crc kubenswrapper[4793]: E0127 22:33:48.806926 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:33:51 crc kubenswrapper[4793]: I0127 22:33:51.804640 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:33:51 crc kubenswrapper[4793]: E0127 22:33:51.805971 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:34:02 crc kubenswrapper[4793]: I0127 22:34:02.804198 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:34:02 crc kubenswrapper[4793]: E0127 22:34:02.806142 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:34:06 crc kubenswrapper[4793]: I0127 22:34:06.803221 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:34:06 crc kubenswrapper[4793]: E0127 22:34:06.803817 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:34:13 crc kubenswrapper[4793]: I0127 22:34:13.804477 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:34:13 crc kubenswrapper[4793]: E0127 22:34:13.805693 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:34:17 crc kubenswrapper[4793]: I0127 22:34:17.804958 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:34:17 crc kubenswrapper[4793]: E0127 22:34:17.805758 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:34:24 crc kubenswrapper[4793]: I0127 22:34:24.803841 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:34:24 crc kubenswrapper[4793]: E0127 22:34:24.804875 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:34:32 crc kubenswrapper[4793]: I0127 22:34:32.810442 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:34:32 crc kubenswrapper[4793]: E0127 22:34:32.811799 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:34:38 crc kubenswrapper[4793]: I0127 22:34:38.804454 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:34:38 crc kubenswrapper[4793]: E0127 22:34:38.805707 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:34:43 crc kubenswrapper[4793]: I0127 22:34:43.805567 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:34:43 crc kubenswrapper[4793]: E0127 22:34:43.807740 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:34:51 crc kubenswrapper[4793]: I0127 22:34:51.802773 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:34:51 crc kubenswrapper[4793]: E0127 22:34:51.803424 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:34:56 crc kubenswrapper[4793]: I0127 22:34:56.804208 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:34:56 crc kubenswrapper[4793]: E0127 22:34:56.805319 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:35:06 crc kubenswrapper[4793]: I0127 22:35:06.805756 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:35:06 crc kubenswrapper[4793]: E0127 22:35:06.806818 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:35:09 crc kubenswrapper[4793]: I0127 22:35:09.803699 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:35:09 crc kubenswrapper[4793]: E0127 22:35:09.804628 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:35:17 crc kubenswrapper[4793]: I0127 22:35:17.803999 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:35:17 crc kubenswrapper[4793]: E0127 22:35:17.805033 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:35:21 crc kubenswrapper[4793]: I0127 22:35:21.809713 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:35:21 crc kubenswrapper[4793]: E0127 22:35:21.810356 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:35:30 crc kubenswrapper[4793]: I0127 22:35:30.804311 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:35:30 crc kubenswrapper[4793]: E0127 22:35:30.805228 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:35:34 crc kubenswrapper[4793]: I0127 22:35:34.804063 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:35:34 crc kubenswrapper[4793]: E0127 22:35:34.804791 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:35:43 crc kubenswrapper[4793]: I0127 22:35:43.803704 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:35:43 crc kubenswrapper[4793]: E0127 22:35:43.804594 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:35:47 crc kubenswrapper[4793]: I0127 22:35:47.804257 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:35:47 crc kubenswrapper[4793]: E0127 22:35:47.805221 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:35:56 crc kubenswrapper[4793]: I0127 22:35:56.804373 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:35:57 crc kubenswrapper[4793]: I0127 22:35:57.764452 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7"} Jan 27 22:35:59 crc kubenswrapper[4793]: I0127 22:35:59.927266 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:35:59 crc kubenswrapper[4793]: E0127 22:35:59.928246 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:36:11 crc kubenswrapper[4793]: I0127 22:36:11.803963 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:36:12 crc kubenswrapper[4793]: I0127 22:36:12.082566 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8"} Jan 27 22:36:13 crc kubenswrapper[4793]: I0127 22:36:13.243374 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:36:15 crc kubenswrapper[4793]: I0127 22:36:15.113787 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" exitCode=1 Jan 27 22:36:15 crc kubenswrapper[4793]: I0127 22:36:15.113858 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8"} Jan 27 22:36:15 crc kubenswrapper[4793]: I0127 22:36:15.114197 4793 scope.go:117] "RemoveContainer" containerID="f75137c03730a22d68d13557156d15bc51e5e020548836ec9062959ad88e77de" Jan 27 22:36:15 crc kubenswrapper[4793]: I0127 22:36:15.115302 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:36:15 crc kubenswrapper[4793]: E0127 22:36:15.115833 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:36:18 crc kubenswrapper[4793]: I0127 22:36:18.243283 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:36:18 crc kubenswrapper[4793]: I0127 22:36:18.243766 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:36:18 crc kubenswrapper[4793]: I0127 22:36:18.243781 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:36:18 crc kubenswrapper[4793]: I0127 22:36:18.244776 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:36:18 crc kubenswrapper[4793]: E0127 22:36:18.245143 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:36:32 crc kubenswrapper[4793]: I0127 22:36:32.803125 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:36:32 crc kubenswrapper[4793]: E0127 22:36:32.804012 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:36:43 crc kubenswrapper[4793]: I0127 22:36:43.804975 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:36:43 crc kubenswrapper[4793]: E0127 22:36:43.805945 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:36:54 crc kubenswrapper[4793]: I0127 22:36:54.804819 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:36:54 crc kubenswrapper[4793]: E0127 22:36:54.805852 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:37:06 crc kubenswrapper[4793]: I0127 22:37:06.804074 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:37:06 crc kubenswrapper[4793]: E0127 22:37:06.805218 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:37:20 crc kubenswrapper[4793]: I0127 22:37:20.804280 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:37:20 crc kubenswrapper[4793]: E0127 22:37:20.805625 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:37:33 crc kubenswrapper[4793]: I0127 22:37:33.803688 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:37:33 crc kubenswrapper[4793]: E0127 22:37:33.804509 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:37:47 crc kubenswrapper[4793]: I0127 22:37:47.803049 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:37:47 crc kubenswrapper[4793]: E0127 22:37:47.803837 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:37:59 crc kubenswrapper[4793]: I0127 22:37:59.804524 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:37:59 crc kubenswrapper[4793]: E0127 22:37:59.805623 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.386233 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:11 crc kubenswrapper[4793]: E0127 22:38:11.387749 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="extract-content" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.387770 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="extract-content" Jan 27 22:38:11 crc kubenswrapper[4793]: E0127 22:38:11.387786 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="extract-utilities" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.387792 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="extract-utilities" Jan 27 22:38:11 crc kubenswrapper[4793]: E0127 22:38:11.387821 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="registry-server" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.387885 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="registry-server" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.388442 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="97f970fb-1266-48a5-8b46-ef0d0cfb295b" containerName="registry-server" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.392088 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.444921 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.574791 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5kzq\" (UniqueName: \"kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.574865 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.574960 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.677097 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5kzq\" (UniqueName: \"kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.677146 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.677201 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.677839 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.677879 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.699885 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5kzq\" (UniqueName: \"kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq\") pod \"community-operators-fhgxg\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:11 crc kubenswrapper[4793]: I0127 22:38:11.747156 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:12 crc kubenswrapper[4793]: I0127 22:38:12.343050 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:12 crc kubenswrapper[4793]: I0127 22:38:12.585869 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerStarted","Data":"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767"} Jan 27 22:38:12 crc kubenswrapper[4793]: I0127 22:38:12.586147 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerStarted","Data":"ce9d8432a8374b3fc406687fe2f8de1ef71244e879ca044e8a02a28d29336c86"} Jan 27 22:38:13 crc kubenswrapper[4793]: I0127 22:38:13.598747 4793 generic.go:334] "Generic (PLEG): container finished" podID="70474c62-b937-475b-b38c-a69bf3979de0" containerID="3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767" exitCode=0 Jan 27 22:38:13 crc kubenswrapper[4793]: I0127 22:38:13.598803 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerDied","Data":"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767"} Jan 27 22:38:13 crc kubenswrapper[4793]: I0127 22:38:13.599063 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerStarted","Data":"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282"} Jan 27 22:38:13 crc kubenswrapper[4793]: I0127 22:38:13.804078 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:38:13 crc kubenswrapper[4793]: E0127 22:38:13.804342 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:38:14 crc kubenswrapper[4793]: I0127 22:38:14.617360 4793 generic.go:334] "Generic (PLEG): container finished" podID="70474c62-b937-475b-b38c-a69bf3979de0" containerID="45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282" exitCode=0 Jan 27 22:38:14 crc kubenswrapper[4793]: I0127 22:38:14.617470 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerDied","Data":"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282"} Jan 27 22:38:15 crc kubenswrapper[4793]: I0127 22:38:15.643791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerStarted","Data":"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619"} Jan 27 22:38:15 crc kubenswrapper[4793]: I0127 22:38:15.673892 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fhgxg" podStartSLOduration=2.220394428 podStartE2EDuration="4.673858753s" podCreationTimestamp="2026-01-27 22:38:11 +0000 UTC" firstStartedPulling="2026-01-27 22:38:12.587891488 +0000 UTC m=+9317.978144654" lastFinishedPulling="2026-01-27 22:38:15.041355823 +0000 UTC m=+9320.431608979" observedRunningTime="2026-01-27 22:38:15.663115784 +0000 UTC m=+9321.053368950" watchObservedRunningTime="2026-01-27 22:38:15.673858753 +0000 UTC m=+9321.064111919" Jan 27 22:38:21 crc kubenswrapper[4793]: I0127 22:38:21.747933 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:21 crc kubenswrapper[4793]: I0127 22:38:21.748612 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:21 crc kubenswrapper[4793]: I0127 22:38:21.825444 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:22 crc kubenswrapper[4793]: I0127 22:38:22.754370 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:38:22 crc kubenswrapper[4793]: I0127 22:38:22.754489 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:38:22 crc kubenswrapper[4793]: I0127 22:38:22.817044 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:22 crc kubenswrapper[4793]: I0127 22:38:22.873338 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:24 crc kubenswrapper[4793]: I0127 22:38:24.785629 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fhgxg" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="registry-server" containerID="cri-o://cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619" gracePeriod=2 Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.294603 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.371097 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities\") pod \"70474c62-b937-475b-b38c-a69bf3979de0\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.371361 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content\") pod \"70474c62-b937-475b-b38c-a69bf3979de0\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.371415 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5kzq\" (UniqueName: \"kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq\") pod \"70474c62-b937-475b-b38c-a69bf3979de0\" (UID: \"70474c62-b937-475b-b38c-a69bf3979de0\") " Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.391423 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities" (OuterVolumeSpecName: "utilities") pod "70474c62-b937-475b-b38c-a69bf3979de0" (UID: "70474c62-b937-475b-b38c-a69bf3979de0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.399162 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq" (OuterVolumeSpecName: "kube-api-access-d5kzq") pod "70474c62-b937-475b-b38c-a69bf3979de0" (UID: "70474c62-b937-475b-b38c-a69bf3979de0"). InnerVolumeSpecName "kube-api-access-d5kzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.447886 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70474c62-b937-475b-b38c-a69bf3979de0" (UID: "70474c62-b937-475b-b38c-a69bf3979de0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.473716 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5kzq\" (UniqueName: \"kubernetes.io/projected/70474c62-b937-475b-b38c-a69bf3979de0-kube-api-access-d5kzq\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.473896 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.473907 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70474c62-b937-475b-b38c-a69bf3979de0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.807747 4793 generic.go:334] "Generic (PLEG): container finished" podID="70474c62-b937-475b-b38c-a69bf3979de0" containerID="cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619" exitCode=0 Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.819141 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhgxg" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.831467 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerDied","Data":"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619"} Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.831621 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhgxg" event={"ID":"70474c62-b937-475b-b38c-a69bf3979de0","Type":"ContainerDied","Data":"ce9d8432a8374b3fc406687fe2f8de1ef71244e879ca044e8a02a28d29336c86"} Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.831695 4793 scope.go:117] "RemoveContainer" containerID="cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.895466 4793 scope.go:117] "RemoveContainer" containerID="45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282" Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.897927 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.918612 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fhgxg"] Jan 27 22:38:25 crc kubenswrapper[4793]: I0127 22:38:25.925403 4793 scope.go:117] "RemoveContainer" containerID="3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.001951 4793 scope.go:117] "RemoveContainer" containerID="cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619" Jan 27 22:38:26 crc kubenswrapper[4793]: E0127 22:38:26.002498 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619\": container with ID starting with cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619 not found: ID does not exist" containerID="cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.002541 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619"} err="failed to get container status \"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619\": rpc error: code = NotFound desc = could not find container \"cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619\": container with ID starting with cc760cd08f83e3f81a326bc41dda4ba60fdbf74a192bf2084940cd1548d0e619 not found: ID does not exist" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.002593 4793 scope.go:117] "RemoveContainer" containerID="45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282" Jan 27 22:38:26 crc kubenswrapper[4793]: E0127 22:38:26.003155 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282\": container with ID starting with 45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282 not found: ID does not exist" containerID="45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.003199 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282"} err="failed to get container status \"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282\": rpc error: code = NotFound desc = could not find container \"45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282\": container with ID starting with 45f36dc8d6741d2d1686c52584a22d2b953ded1bce59f6a7c06f18a9347f1282 not found: ID does not exist" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.003230 4793 scope.go:117] "RemoveContainer" containerID="3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767" Jan 27 22:38:26 crc kubenswrapper[4793]: E0127 22:38:26.003509 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767\": container with ID starting with 3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767 not found: ID does not exist" containerID="3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767" Jan 27 22:38:26 crc kubenswrapper[4793]: I0127 22:38:26.003541 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767"} err="failed to get container status \"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767\": rpc error: code = NotFound desc = could not find container \"3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767\": container with ID starting with 3e5f17e7f8e524a8a636a9e42ea3ea1a5fa3e903212dfb67e8d9311766911767 not found: ID does not exist" Jan 27 22:38:27 crc kubenswrapper[4793]: I0127 22:38:27.803846 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:38:27 crc kubenswrapper[4793]: E0127 22:38:27.804504 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:38:27 crc kubenswrapper[4793]: I0127 22:38:27.828829 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70474c62-b937-475b-b38c-a69bf3979de0" path="/var/lib/kubelet/pods/70474c62-b937-475b-b38c-a69bf3979de0/volumes" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.850461 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:31 crc kubenswrapper[4793]: E0127 22:38:31.851345 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="extract-content" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.851358 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="extract-content" Jan 27 22:38:31 crc kubenswrapper[4793]: E0127 22:38:31.851383 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="registry-server" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.851389 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="registry-server" Jan 27 22:38:31 crc kubenswrapper[4793]: E0127 22:38:31.851401 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="extract-utilities" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.851409 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="extract-utilities" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.851619 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="70474c62-b937-475b-b38c-a69bf3979de0" containerName="registry-server" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.853092 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.873930 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.888106 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.888169 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-294ll\" (UniqueName: \"kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.888747 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.991589 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.991840 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-294ll\" (UniqueName: \"kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.992177 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.992475 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:31 crc kubenswrapper[4793]: I0127 22:38:31.993095 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:32 crc kubenswrapper[4793]: I0127 22:38:32.017926 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-294ll\" (UniqueName: \"kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll\") pod \"redhat-operators-dx8x7\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:32 crc kubenswrapper[4793]: I0127 22:38:32.187353 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:32 crc kubenswrapper[4793]: I0127 22:38:32.667421 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:32 crc kubenswrapper[4793]: I0127 22:38:32.932528 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerStarted","Data":"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674"} Jan 27 22:38:32 crc kubenswrapper[4793]: I0127 22:38:32.932600 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerStarted","Data":"19ffd01f18f903545f49df13ec49cd6d1965f92e8e9acbbbb4927eef43360385"} Jan 27 22:38:33 crc kubenswrapper[4793]: I0127 22:38:33.947350 4793 generic.go:334] "Generic (PLEG): container finished" podID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerID="ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674" exitCode=0 Jan 27 22:38:33 crc kubenswrapper[4793]: I0127 22:38:33.948241 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerDied","Data":"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674"} Jan 27 22:38:33 crc kubenswrapper[4793]: I0127 22:38:33.953021 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:38:34 crc kubenswrapper[4793]: I0127 22:38:34.963532 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerStarted","Data":"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047"} Jan 27 22:38:39 crc kubenswrapper[4793]: I0127 22:38:39.004974 4793 generic.go:334] "Generic (PLEG): container finished" podID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerID="a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047" exitCode=0 Jan 27 22:38:39 crc kubenswrapper[4793]: I0127 22:38:39.005075 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerDied","Data":"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047"} Jan 27 22:38:40 crc kubenswrapper[4793]: I0127 22:38:40.020916 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerStarted","Data":"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5"} Jan 27 22:38:40 crc kubenswrapper[4793]: I0127 22:38:40.062598 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dx8x7" podStartSLOduration=3.597731204 podStartE2EDuration="9.062577637s" podCreationTimestamp="2026-01-27 22:38:31 +0000 UTC" firstStartedPulling="2026-01-27 22:38:33.952759475 +0000 UTC m=+9339.343012631" lastFinishedPulling="2026-01-27 22:38:39.417605878 +0000 UTC m=+9344.807859064" observedRunningTime="2026-01-27 22:38:40.054327408 +0000 UTC m=+9345.444580574" watchObservedRunningTime="2026-01-27 22:38:40.062577637 +0000 UTC m=+9345.452830793" Jan 27 22:38:41 crc kubenswrapper[4793]: I0127 22:38:41.803977 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:38:41 crc kubenswrapper[4793]: E0127 22:38:41.804892 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:38:42 crc kubenswrapper[4793]: I0127 22:38:42.187915 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:42 crc kubenswrapper[4793]: I0127 22:38:42.189200 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:43 crc kubenswrapper[4793]: I0127 22:38:43.236844 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dx8x7" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="registry-server" probeResult="failure" output=< Jan 27 22:38:43 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:38:43 crc kubenswrapper[4793]: > Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.276460 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.367822 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.519193 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.753600 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.753664 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:38:52 crc kubenswrapper[4793]: I0127 22:38:52.803606 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:38:52 crc kubenswrapper[4793]: E0127 22:38:52.803859 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.163513 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dx8x7" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="registry-server" containerID="cri-o://2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5" gracePeriod=2 Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.729239 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.739426 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities\") pod \"87b92dc8-59fe-4c74-a3cd-821e862aae88\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.739523 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content\") pod \"87b92dc8-59fe-4c74-a3cd-821e862aae88\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.739614 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-294ll\" (UniqueName: \"kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll\") pod \"87b92dc8-59fe-4c74-a3cd-821e862aae88\" (UID: \"87b92dc8-59fe-4c74-a3cd-821e862aae88\") " Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.754816 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll" (OuterVolumeSpecName: "kube-api-access-294ll") pod "87b92dc8-59fe-4c74-a3cd-821e862aae88" (UID: "87b92dc8-59fe-4c74-a3cd-821e862aae88"). InnerVolumeSpecName "kube-api-access-294ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.757664 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities" (OuterVolumeSpecName: "utilities") pod "87b92dc8-59fe-4c74-a3cd-821e862aae88" (UID: "87b92dc8-59fe-4c74-a3cd-821e862aae88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.853981 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-294ll\" (UniqueName: \"kubernetes.io/projected/87b92dc8-59fe-4c74-a3cd-821e862aae88-kube-api-access-294ll\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:54 crc kubenswrapper[4793]: I0127 22:38:54.854481 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.082803 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87b92dc8-59fe-4c74-a3cd-821e862aae88" (UID: "87b92dc8-59fe-4c74-a3cd-821e862aae88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.103823 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87b92dc8-59fe-4c74-a3cd-821e862aae88-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.172657 4793 generic.go:334] "Generic (PLEG): container finished" podID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerID="2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5" exitCode=0 Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.172697 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerDied","Data":"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5"} Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.172720 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dx8x7" event={"ID":"87b92dc8-59fe-4c74-a3cd-821e862aae88","Type":"ContainerDied","Data":"19ffd01f18f903545f49df13ec49cd6d1965f92e8e9acbbbb4927eef43360385"} Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.172737 4793 scope.go:117] "RemoveContainer" containerID="2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.172850 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dx8x7" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.202721 4793 scope.go:117] "RemoveContainer" containerID="a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.231520 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.232705 4793 scope.go:117] "RemoveContainer" containerID="ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.248936 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dx8x7"] Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.276943 4793 scope.go:117] "RemoveContainer" containerID="2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5" Jan 27 22:38:55 crc kubenswrapper[4793]: E0127 22:38:55.277419 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5\": container with ID starting with 2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5 not found: ID does not exist" containerID="2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.277450 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5"} err="failed to get container status \"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5\": rpc error: code = NotFound desc = could not find container \"2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5\": container with ID starting with 2edead9b5049c4894303630ee7d5aa0268c727d286e408e233a43479569738b5 not found: ID does not exist" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.277471 4793 scope.go:117] "RemoveContainer" containerID="a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047" Jan 27 22:38:55 crc kubenswrapper[4793]: E0127 22:38:55.278100 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047\": container with ID starting with a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047 not found: ID does not exist" containerID="a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.278169 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047"} err="failed to get container status \"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047\": rpc error: code = NotFound desc = could not find container \"a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047\": container with ID starting with a3eb5ae8b51799ba55b936d560e50d29d8f2fddd1cc663fe91feab6aacb99047 not found: ID does not exist" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.278209 4793 scope.go:117] "RemoveContainer" containerID="ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674" Jan 27 22:38:55 crc kubenswrapper[4793]: E0127 22:38:55.278625 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674\": container with ID starting with ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674 not found: ID does not exist" containerID="ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.278680 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674"} err="failed to get container status \"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674\": rpc error: code = NotFound desc = could not find container \"ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674\": container with ID starting with ebd23b41c464346e9a65aa8066d93ae11dc3eb16f0fa9a2390b02fcaca6ae674 not found: ID does not exist" Jan 27 22:38:55 crc kubenswrapper[4793]: I0127 22:38:55.831638 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" path="/var/lib/kubelet/pods/87b92dc8-59fe-4c74-a3cd-821e862aae88/volumes" Jan 27 22:39:04 crc kubenswrapper[4793]: I0127 22:39:04.804049 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:39:04 crc kubenswrapper[4793]: E0127 22:39:04.805194 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:39:18 crc kubenswrapper[4793]: I0127 22:39:18.803172 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:39:18 crc kubenswrapper[4793]: E0127 22:39:18.804076 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:39:22 crc kubenswrapper[4793]: I0127 22:39:22.753099 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:39:22 crc kubenswrapper[4793]: I0127 22:39:22.753764 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:39:22 crc kubenswrapper[4793]: I0127 22:39:22.753850 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:39:22 crc kubenswrapper[4793]: I0127 22:39:22.754922 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:39:22 crc kubenswrapper[4793]: I0127 22:39:22.755003 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7" gracePeriod=600 Jan 27 22:39:23 crc kubenswrapper[4793]: I0127 22:39:23.515025 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7" exitCode=0 Jan 27 22:39:23 crc kubenswrapper[4793]: I0127 22:39:23.515070 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7"} Jan 27 22:39:23 crc kubenswrapper[4793]: I0127 22:39:23.515671 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027"} Jan 27 22:39:23 crc kubenswrapper[4793]: I0127 22:39:23.515696 4793 scope.go:117] "RemoveContainer" containerID="557833b020bd326e34e710094a84c9402ea1d9cf061ef329963c713d5b0d555a" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.330400 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:25 crc kubenswrapper[4793]: E0127 22:39:25.331731 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="extract-content" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.331759 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="extract-content" Jan 27 22:39:25 crc kubenswrapper[4793]: E0127 22:39:25.331795 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="registry-server" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.331811 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="registry-server" Jan 27 22:39:25 crc kubenswrapper[4793]: E0127 22:39:25.331844 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="extract-utilities" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.331862 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="extract-utilities" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.332308 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="87b92dc8-59fe-4c74-a3cd-821e862aae88" containerName="registry-server" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.334508 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.347138 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.368788 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.368955 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.368993 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn8j7\" (UniqueName: \"kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.471974 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.472213 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.472262 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn8j7\" (UniqueName: \"kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.472742 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.472777 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.498634 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn8j7\" (UniqueName: \"kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7\") pod \"certified-operators-4nksf\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:25 crc kubenswrapper[4793]: I0127 22:39:25.673145 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:26 crc kubenswrapper[4793]: I0127 22:39:26.223599 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:26 crc kubenswrapper[4793]: I0127 22:39:26.555034 4793 generic.go:334] "Generic (PLEG): container finished" podID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerID="2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e" exitCode=0 Jan 27 22:39:26 crc kubenswrapper[4793]: I0127 22:39:26.555174 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerDied","Data":"2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e"} Jan 27 22:39:26 crc kubenswrapper[4793]: I0127 22:39:26.555386 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerStarted","Data":"792a5be957cfca619e642a3cdc1084d58ddf8e92aaab0725e7c242c3d8f98432"} Jan 27 22:39:28 crc kubenswrapper[4793]: I0127 22:39:28.576140 4793 generic.go:334] "Generic (PLEG): container finished" podID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerID="a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a" exitCode=0 Jan 27 22:39:28 crc kubenswrapper[4793]: I0127 22:39:28.576200 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerDied","Data":"a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a"} Jan 27 22:39:29 crc kubenswrapper[4793]: I0127 22:39:29.588409 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerStarted","Data":"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c"} Jan 27 22:39:29 crc kubenswrapper[4793]: I0127 22:39:29.613865 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4nksf" podStartSLOduration=2.047535666 podStartE2EDuration="4.613846487s" podCreationTimestamp="2026-01-27 22:39:25 +0000 UTC" firstStartedPulling="2026-01-27 22:39:26.557706761 +0000 UTC m=+9391.947959947" lastFinishedPulling="2026-01-27 22:39:29.124017622 +0000 UTC m=+9394.514270768" observedRunningTime="2026-01-27 22:39:29.60523564 +0000 UTC m=+9394.995488816" watchObservedRunningTime="2026-01-27 22:39:29.613846487 +0000 UTC m=+9395.004099643" Jan 27 22:39:32 crc kubenswrapper[4793]: I0127 22:39:32.803951 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:39:32 crc kubenswrapper[4793]: E0127 22:39:32.804793 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:39:35 crc kubenswrapper[4793]: I0127 22:39:35.673719 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:35 crc kubenswrapper[4793]: I0127 22:39:35.673993 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:35 crc kubenswrapper[4793]: I0127 22:39:35.745848 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:36 crc kubenswrapper[4793]: I0127 22:39:36.737023 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:36 crc kubenswrapper[4793]: I0127 22:39:36.795861 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:38 crc kubenswrapper[4793]: I0127 22:39:38.689952 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4nksf" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="registry-server" containerID="cri-o://ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c" gracePeriod=2 Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.281106 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.305060 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn8j7\" (UniqueName: \"kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7\") pod \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.305129 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities\") pod \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.305241 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content\") pod \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\" (UID: \"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c\") " Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.308517 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities" (OuterVolumeSpecName: "utilities") pod "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" (UID: "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.321905 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7" (OuterVolumeSpecName: "kube-api-access-xn8j7") pod "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" (UID: "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c"). InnerVolumeSpecName "kube-api-access-xn8j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.372100 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" (UID: "2f9e552a-cfb2-42b8-b2b5-f701fa77b17c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.412749 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn8j7\" (UniqueName: \"kubernetes.io/projected/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-kube-api-access-xn8j7\") on node \"crc\" DevicePath \"\"" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.412800 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.412816 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.708349 4793 generic.go:334] "Generic (PLEG): container finished" podID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerID="ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c" exitCode=0 Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.708402 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4nksf" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.708412 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerDied","Data":"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c"} Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.708927 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4nksf" event={"ID":"2f9e552a-cfb2-42b8-b2b5-f701fa77b17c","Type":"ContainerDied","Data":"792a5be957cfca619e642a3cdc1084d58ddf8e92aaab0725e7c242c3d8f98432"} Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.708973 4793 scope.go:117] "RemoveContainer" containerID="ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.748053 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.757444 4793 scope.go:117] "RemoveContainer" containerID="a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.761438 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4nksf"] Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.785316 4793 scope.go:117] "RemoveContainer" containerID="2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.818202 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" path="/var/lib/kubelet/pods/2f9e552a-cfb2-42b8-b2b5-f701fa77b17c/volumes" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.833115 4793 scope.go:117] "RemoveContainer" containerID="ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c" Jan 27 22:39:39 crc kubenswrapper[4793]: E0127 22:39:39.837440 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c\": container with ID starting with ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c not found: ID does not exist" containerID="ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.838055 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c"} err="failed to get container status \"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c\": rpc error: code = NotFound desc = could not find container \"ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c\": container with ID starting with ce577412139e513eb59134803fb82689d5ab37ab10592c97423936113aeb6e3c not found: ID does not exist" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.838439 4793 scope.go:117] "RemoveContainer" containerID="a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a" Jan 27 22:39:39 crc kubenswrapper[4793]: E0127 22:39:39.839427 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a\": container with ID starting with a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a not found: ID does not exist" containerID="a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.839813 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a"} err="failed to get container status \"a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a\": rpc error: code = NotFound desc = could not find container \"a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a\": container with ID starting with a2dd128936271168312cf9435079dd647ab81091a9017c06d71061293deec38a not found: ID does not exist" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.840044 4793 scope.go:117] "RemoveContainer" containerID="2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e" Jan 27 22:39:39 crc kubenswrapper[4793]: E0127 22:39:39.841203 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e\": container with ID starting with 2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e not found: ID does not exist" containerID="2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e" Jan 27 22:39:39 crc kubenswrapper[4793]: I0127 22:39:39.841329 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e"} err="failed to get container status \"2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e\": rpc error: code = NotFound desc = could not find container \"2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e\": container with ID starting with 2e5588d59561c10e997e4a13c0c15ca7bcf1c9315253304ee6bc6fc6c37e9a1e not found: ID does not exist" Jan 27 22:39:39 crc kubenswrapper[4793]: E0127 22:39:39.885271 4793 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f9e552a_cfb2_42b8_b2b5_f701fa77b17c.slice/crio-792a5be957cfca619e642a3cdc1084d58ddf8e92aaab0725e7c242c3d8f98432\": RecentStats: unable to find data in memory cache]" Jan 27 22:39:43 crc kubenswrapper[4793]: I0127 22:39:43.804277 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:39:43 crc kubenswrapper[4793]: E0127 22:39:43.804969 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:39:57 crc kubenswrapper[4793]: I0127 22:39:57.804986 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:39:57 crc kubenswrapper[4793]: E0127 22:39:57.805976 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:40:10 crc kubenswrapper[4793]: I0127 22:40:10.804292 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:40:10 crc kubenswrapper[4793]: E0127 22:40:10.805072 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:40:22 crc kubenswrapper[4793]: I0127 22:40:22.803536 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:40:22 crc kubenswrapper[4793]: E0127 22:40:22.804327 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:40:34 crc kubenswrapper[4793]: I0127 22:40:34.803825 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:40:34 crc kubenswrapper[4793]: E0127 22:40:34.805146 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:40:49 crc kubenswrapper[4793]: I0127 22:40:49.905127 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:40:49 crc kubenswrapper[4793]: E0127 22:40:49.965102 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:01 crc kubenswrapper[4793]: I0127 22:41:01.804057 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:41:01 crc kubenswrapper[4793]: E0127 22:41:01.804738 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:12 crc kubenswrapper[4793]: I0127 22:41:12.804024 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:41:12 crc kubenswrapper[4793]: E0127 22:41:12.804993 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:25 crc kubenswrapper[4793]: I0127 22:41:25.813631 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:41:26 crc kubenswrapper[4793]: I0127 22:41:26.528669 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f"} Jan 27 22:41:28 crc kubenswrapper[4793]: I0127 22:41:28.242730 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:41:28 crc kubenswrapper[4793]: I0127 22:41:28.243162 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:41:28 crc kubenswrapper[4793]: I0127 22:41:28.277031 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 22:41:28 crc kubenswrapper[4793]: I0127 22:41:28.608650 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 22:41:29 crc kubenswrapper[4793]: I0127 22:41:29.578881 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" exitCode=1 Jan 27 22:41:29 crc kubenswrapper[4793]: I0127 22:41:29.578949 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f"} Jan 27 22:41:29 crc kubenswrapper[4793]: I0127 22:41:29.579009 4793 scope.go:117] "RemoveContainer" containerID="663bcb374f814eab5fdd84fa30c873f0bfebeb96c9d697424b97309d21bb65c8" Jan 27 22:41:29 crc kubenswrapper[4793]: I0127 22:41:29.580596 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:41:29 crc kubenswrapper[4793]: E0127 22:41:29.581720 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:30 crc kubenswrapper[4793]: I0127 22:41:30.591785 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:41:30 crc kubenswrapper[4793]: E0127 22:41:30.592718 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:33 crc kubenswrapper[4793]: I0127 22:41:33.243199 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:41:33 crc kubenswrapper[4793]: I0127 22:41:33.245272 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:41:33 crc kubenswrapper[4793]: E0127 22:41:33.245841 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:34 crc kubenswrapper[4793]: I0127 22:41:34.749156 4793 generic.go:334] "Generic (PLEG): container finished" podID="7c64ec91-a07a-470e-a490-2ad9c6a06248" containerID="fd2cf3bb7cf77a00f7145e931525a9f1cc5621f2673ecf530b5f46b0aded5fd0" exitCode=1 Jan 27 22:41:34 crc kubenswrapper[4793]: I0127 22:41:34.749345 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7c64ec91-a07a-470e-a490-2ad9c6a06248","Type":"ContainerDied","Data":"fd2cf3bb7cf77a00f7145e931525a9f1cc5621f2673ecf530b5f46b0aded5fd0"} Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.337376 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.379464 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.389158 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.389769 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.389901 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.390163 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.390287 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px2q2\" (UniqueName: \"kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.390663 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.390955 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.391263 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"7c64ec91-a07a-470e-a490-2ad9c6a06248\" (UID: \"7c64ec91-a07a-470e-a490-2ad9c6a06248\") " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.675569 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.678222 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.680138 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2" (OuterVolumeSpecName: "kube-api-access-px2q2") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "kube-api-access-px2q2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.684330 4793 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.684482 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px2q2\" (UniqueName: \"kubernetes.io/projected/7c64ec91-a07a-470e-a490-2ad9c6a06248-kube-api-access-px2q2\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.684619 4793 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7c64ec91-a07a-470e-a490-2ad9c6a06248-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.681147 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data" (OuterVolumeSpecName: "config-data") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.687113 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.688726 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.734795 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.741916 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.755203 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "7c64ec91-a07a-470e-a490-2ad9c6a06248" (UID: "7c64ec91-a07a-470e-a490-2ad9c6a06248"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.785959 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7c64ec91-a07a-470e-a490-2ad9c6a06248","Type":"ContainerDied","Data":"632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c"} Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.786031 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="632d895459df67cd4eab9e1566fefc9d12b6a4225edbca482b10cb2ef8c0249c" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.786099 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789676 4793 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789741 4793 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789752 4793 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789761 4793 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789771 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.789781 4793 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7c64ec91-a07a-470e-a490-2ad9c6a06248-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.812038 4793 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 27 22:41:36 crc kubenswrapper[4793]: I0127 22:41:36.891126 4793 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 27 22:41:38 crc kubenswrapper[4793]: I0127 22:41:38.242698 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:41:38 crc kubenswrapper[4793]: I0127 22:41:38.243574 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:41:38 crc kubenswrapper[4793]: I0127 22:41:38.245065 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:41:38 crc kubenswrapper[4793]: E0127 22:41:38.245672 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.254871 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 22:41:39 crc kubenswrapper[4793]: E0127 22:41:39.256097 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c64ec91-a07a-470e-a490-2ad9c6a06248" containerName="tempest-tests-tempest-tests-runner" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.256145 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c64ec91-a07a-470e-a490-2ad9c6a06248" containerName="tempest-tests-tempest-tests-runner" Jan 27 22:41:39 crc kubenswrapper[4793]: E0127 22:41:39.256178 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="registry-server" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.256194 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="registry-server" Jan 27 22:41:39 crc kubenswrapper[4793]: E0127 22:41:39.256224 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="extract-content" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.256243 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="extract-content" Jan 27 22:41:39 crc kubenswrapper[4793]: E0127 22:41:39.256283 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="extract-utilities" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.256440 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="extract-utilities" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.256955 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c64ec91-a07a-470e-a490-2ad9c6a06248" containerName="tempest-tests-tempest-tests-runner" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.257040 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f9e552a-cfb2-42b8-b2b5-f701fa77b17c" containerName="registry-server" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.258755 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.263671 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-ltsfq" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.274975 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.362797 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.363066 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rgpx\" (UniqueName: \"kubernetes.io/projected/8b5d6801-ee34-4795-926a-d9ec345d7eae-kube-api-access-4rgpx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.465798 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.466238 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rgpx\" (UniqueName: \"kubernetes.io/projected/8b5d6801-ee34-4795-926a-d9ec345d7eae-kube-api-access-4rgpx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.466475 4793 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.492701 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rgpx\" (UniqueName: \"kubernetes.io/projected/8b5d6801-ee34-4795-926a-d9ec345d7eae-kube-api-access-4rgpx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.509646 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"8b5d6801-ee34-4795-926a-d9ec345d7eae\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:39 crc kubenswrapper[4793]: I0127 22:41:39.596648 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 22:41:40 crc kubenswrapper[4793]: I0127 22:41:40.372126 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 22:41:40 crc kubenswrapper[4793]: W0127 22:41:40.382183 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b5d6801_ee34_4795_926a_d9ec345d7eae.slice/crio-52dd11946c008a281e0c1608368210f4cc3b133b2c59a73a675f6b469a520021 WatchSource:0}: Error finding container 52dd11946c008a281e0c1608368210f4cc3b133b2c59a73a675f6b469a520021: Status 404 returned error can't find the container with id 52dd11946c008a281e0c1608368210f4cc3b133b2c59a73a675f6b469a520021 Jan 27 22:41:40 crc kubenswrapper[4793]: I0127 22:41:40.969761 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"8b5d6801-ee34-4795-926a-d9ec345d7eae","Type":"ContainerStarted","Data":"52dd11946c008a281e0c1608368210f4cc3b133b2c59a73a675f6b469a520021"} Jan 27 22:41:41 crc kubenswrapper[4793]: I0127 22:41:41.983884 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"8b5d6801-ee34-4795-926a-d9ec345d7eae","Type":"ContainerStarted","Data":"ced73cbac212b4d00018c7d1405b60cfcbdf0aa4dcfc7e51a3e6325ca57d31c6"} Jan 27 22:41:42 crc kubenswrapper[4793]: I0127 22:41:42.029396 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.928879765 podStartE2EDuration="3.029368305s" podCreationTimestamp="2026-01-27 22:41:39 +0000 UTC" firstStartedPulling="2026-01-27 22:41:40.386424852 +0000 UTC m=+9525.776678018" lastFinishedPulling="2026-01-27 22:41:41.486913362 +0000 UTC m=+9526.877166558" observedRunningTime="2026-01-27 22:41:42.002849687 +0000 UTC m=+9527.393102883" watchObservedRunningTime="2026-01-27 22:41:42.029368305 +0000 UTC m=+9527.419621501" Jan 27 22:41:48 crc kubenswrapper[4793]: I0127 22:41:48.803432 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:41:48 crc kubenswrapper[4793]: E0127 22:41:48.805581 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:41:52 crc kubenswrapper[4793]: I0127 22:41:52.753632 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:41:52 crc kubenswrapper[4793]: I0127 22:41:52.754127 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:42:03 crc kubenswrapper[4793]: I0127 22:42:03.806238 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:42:03 crc kubenswrapper[4793]: E0127 22:42:03.807724 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.470460 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-s64mx/must-gather-bsnf7"] Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.472633 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.475288 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-s64mx"/"default-dockercfg-7zswl" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.475410 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-s64mx"/"openshift-service-ca.crt" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.476269 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-s64mx"/"kube-root-ca.crt" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.483370 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-s64mx/must-gather-bsnf7"] Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.619453 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.619885 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cplnr\" (UniqueName: \"kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.722015 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.722074 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cplnr\" (UniqueName: \"kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.722871 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.750439 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cplnr\" (UniqueName: \"kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr\") pod \"must-gather-bsnf7\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:13 crc kubenswrapper[4793]: I0127 22:42:13.826966 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:42:14 crc kubenswrapper[4793]: I0127 22:42:14.318729 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-s64mx/must-gather-bsnf7"] Jan 27 22:42:14 crc kubenswrapper[4793]: I0127 22:42:14.414896 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/must-gather-bsnf7" event={"ID":"b8f70002-f5f1-4e33-99ec-385dcde56935","Type":"ContainerStarted","Data":"ee5dfc4c99928944e3cfa42b3d91db3f034460b9206fc9edace1da4e3e39ea0e"} Jan 27 22:42:18 crc kubenswrapper[4793]: I0127 22:42:18.804348 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:42:18 crc kubenswrapper[4793]: E0127 22:42:18.805701 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:42:22 crc kubenswrapper[4793]: I0127 22:42:22.525809 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/must-gather-bsnf7" event={"ID":"b8f70002-f5f1-4e33-99ec-385dcde56935","Type":"ContainerStarted","Data":"fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e"} Jan 27 22:42:22 crc kubenswrapper[4793]: I0127 22:42:22.526277 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/must-gather-bsnf7" event={"ID":"b8f70002-f5f1-4e33-99ec-385dcde56935","Type":"ContainerStarted","Data":"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841"} Jan 27 22:42:22 crc kubenswrapper[4793]: I0127 22:42:22.563472 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-s64mx/must-gather-bsnf7" podStartSLOduration=2.392019158 podStartE2EDuration="9.563441113s" podCreationTimestamp="2026-01-27 22:42:13 +0000 UTC" firstStartedPulling="2026-01-27 22:42:14.332576543 +0000 UTC m=+9559.722829699" lastFinishedPulling="2026-01-27 22:42:21.503998498 +0000 UTC m=+9566.894251654" observedRunningTime="2026-01-27 22:42:22.548094103 +0000 UTC m=+9567.938347289" watchObservedRunningTime="2026-01-27 22:42:22.563441113 +0000 UTC m=+9567.953694299" Jan 27 22:42:22 crc kubenswrapper[4793]: I0127 22:42:22.753840 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:42:22 crc kubenswrapper[4793]: I0127 22:42:22.753943 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:42:26 crc kubenswrapper[4793]: I0127 22:42:26.876296 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-s64mx/crc-debug-cx9k8"] Jan 27 22:42:26 crc kubenswrapper[4793]: I0127 22:42:26.878659 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.023609 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv9hl\" (UniqueName: \"kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.023822 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.125971 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.126121 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.126176 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv9hl\" (UniqueName: \"kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.150952 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv9hl\" (UniqueName: \"kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl\") pod \"crc-debug-cx9k8\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.195966 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:42:27 crc kubenswrapper[4793]: I0127 22:42:27.612142 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" event={"ID":"8dd94a4f-4d97-4268-9564-c7fea736791d","Type":"ContainerStarted","Data":"86ed434cb6ad34cbcdb4725a35bdb15ba01e731b698c2008cbcee9f43bd63385"} Jan 27 22:42:30 crc kubenswrapper[4793]: I0127 22:42:30.802921 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:42:30 crc kubenswrapper[4793]: E0127 22:42:30.804661 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:42:39 crc kubenswrapper[4793]: I0127 22:42:39.850786 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" event={"ID":"8dd94a4f-4d97-4268-9564-c7fea736791d","Type":"ContainerStarted","Data":"d902aeed4c051abacbb2ae03a374b91aa497243fc85f7393148e0022f704bd54"} Jan 27 22:42:39 crc kubenswrapper[4793]: I0127 22:42:39.877168 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" podStartSLOduration=2.345381885 podStartE2EDuration="13.877137239s" podCreationTimestamp="2026-01-27 22:42:26 +0000 UTC" firstStartedPulling="2026-01-27 22:42:27.247833486 +0000 UTC m=+9572.638086642" lastFinishedPulling="2026-01-27 22:42:38.77958885 +0000 UTC m=+9584.169841996" observedRunningTime="2026-01-27 22:42:39.864387383 +0000 UTC m=+9585.254640569" watchObservedRunningTime="2026-01-27 22:42:39.877137239 +0000 UTC m=+9585.267390435" Jan 27 22:42:42 crc kubenswrapper[4793]: I0127 22:42:42.804598 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:42:42 crc kubenswrapper[4793]: E0127 22:42:42.805455 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:42:52 crc kubenswrapper[4793]: I0127 22:42:52.754231 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:42:52 crc kubenswrapper[4793]: I0127 22:42:52.756973 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:42:52 crc kubenswrapper[4793]: I0127 22:42:52.757358 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:42:52 crc kubenswrapper[4793]: I0127 22:42:52.759206 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:42:52 crc kubenswrapper[4793]: I0127 22:42:52.759751 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" gracePeriod=600 Jan 27 22:42:52 crc kubenswrapper[4793]: E0127 22:42:52.893885 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:42:53 crc kubenswrapper[4793]: I0127 22:42:53.048331 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" exitCode=0 Jan 27 22:42:53 crc kubenswrapper[4793]: I0127 22:42:53.048445 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027"} Jan 27 22:42:53 crc kubenswrapper[4793]: I0127 22:42:53.048703 4793 scope.go:117] "RemoveContainer" containerID="9b3b13faabad6d944a8ad6c69d8f90df09b3942d8c601fe26f08eb8bbc2c79d7" Jan 27 22:42:53 crc kubenswrapper[4793]: I0127 22:42:53.049504 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:42:53 crc kubenswrapper[4793]: E0127 22:42:53.049932 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:42:53 crc kubenswrapper[4793]: I0127 22:42:53.807056 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:42:53 crc kubenswrapper[4793]: E0127 22:42:53.808144 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:43:06 crc kubenswrapper[4793]: I0127 22:43:06.805233 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:43:06 crc kubenswrapper[4793]: E0127 22:43:06.805907 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:43:08 crc kubenswrapper[4793]: I0127 22:43:08.803479 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:43:08 crc kubenswrapper[4793]: E0127 22:43:08.804200 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:43:20 crc kubenswrapper[4793]: I0127 22:43:20.803862 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:43:20 crc kubenswrapper[4793]: E0127 22:43:20.804487 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:43:23 crc kubenswrapper[4793]: I0127 22:43:23.803737 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:43:23 crc kubenswrapper[4793]: E0127 22:43:23.804847 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:43:25 crc kubenswrapper[4793]: I0127 22:43:25.543620 4793 generic.go:334] "Generic (PLEG): container finished" podID="8dd94a4f-4d97-4268-9564-c7fea736791d" containerID="d902aeed4c051abacbb2ae03a374b91aa497243fc85f7393148e0022f704bd54" exitCode=0 Jan 27 22:43:25 crc kubenswrapper[4793]: I0127 22:43:25.543699 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" event={"ID":"8dd94a4f-4d97-4268-9564-c7fea736791d","Type":"ContainerDied","Data":"d902aeed4c051abacbb2ae03a374b91aa497243fc85f7393148e0022f704bd54"} Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.679443 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.726085 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-cx9k8"] Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.736470 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-cx9k8"] Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.872765 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host\") pod \"8dd94a4f-4d97-4268-9564-c7fea736791d\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.872813 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xv9hl\" (UniqueName: \"kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl\") pod \"8dd94a4f-4d97-4268-9564-c7fea736791d\" (UID: \"8dd94a4f-4d97-4268-9564-c7fea736791d\") " Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.872872 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host" (OuterVolumeSpecName: "host") pod "8dd94a4f-4d97-4268-9564-c7fea736791d" (UID: "8dd94a4f-4d97-4268-9564-c7fea736791d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.873347 4793 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8dd94a4f-4d97-4268-9564-c7fea736791d-host\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.889862 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl" (OuterVolumeSpecName: "kube-api-access-xv9hl") pod "8dd94a4f-4d97-4268-9564-c7fea736791d" (UID: "8dd94a4f-4d97-4268-9564-c7fea736791d"). InnerVolumeSpecName "kube-api-access-xv9hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:43:26 crc kubenswrapper[4793]: I0127 22:43:26.975720 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xv9hl\" (UniqueName: \"kubernetes.io/projected/8dd94a4f-4d97-4268-9564-c7fea736791d-kube-api-access-xv9hl\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.563452 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86ed434cb6ad34cbcdb4725a35bdb15ba01e731b698c2008cbcee9f43bd63385" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.563744 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-cx9k8" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.815628 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dd94a4f-4d97-4268-9564-c7fea736791d" path="/var/lib/kubelet/pods/8dd94a4f-4d97-4268-9564-c7fea736791d/volumes" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.973696 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-s64mx/crc-debug-5z8bk"] Jan 27 22:43:27 crc kubenswrapper[4793]: E0127 22:43:27.974239 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd94a4f-4d97-4268-9564-c7fea736791d" containerName="container-00" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.974271 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd94a4f-4d97-4268-9564-c7fea736791d" containerName="container-00" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.974515 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dd94a4f-4d97-4268-9564-c7fea736791d" containerName="container-00" Jan 27 22:43:27 crc kubenswrapper[4793]: I0127 22:43:27.975340 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.099306 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.100580 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm92w\" (UniqueName: \"kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.203348 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.203728 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm92w\" (UniqueName: \"kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.203473 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.232357 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm92w\" (UniqueName: \"kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w\") pod \"crc-debug-5z8bk\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.295433 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:28 crc kubenswrapper[4793]: I0127 22:43:28.572327 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" event={"ID":"a3e32d48-0574-4ea8-bfc5-efdd127637de","Type":"ContainerStarted","Data":"9b702a70c1c5556909487787e85e331dab2c62208f31b695752cc23c621ea440"} Jan 27 22:43:29 crc kubenswrapper[4793]: I0127 22:43:29.581848 4793 generic.go:334] "Generic (PLEG): container finished" podID="a3e32d48-0574-4ea8-bfc5-efdd127637de" containerID="822d1de0d05633f587fb5775856608b265ebe3856f0f9e876e68383dbf58f542" exitCode=0 Jan 27 22:43:29 crc kubenswrapper[4793]: I0127 22:43:29.581900 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" event={"ID":"a3e32d48-0574-4ea8-bfc5-efdd127637de","Type":"ContainerDied","Data":"822d1de0d05633f587fb5775856608b265ebe3856f0f9e876e68383dbf58f542"} Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.702286 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.754519 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host\") pod \"a3e32d48-0574-4ea8-bfc5-efdd127637de\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.754603 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm92w\" (UniqueName: \"kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w\") pod \"a3e32d48-0574-4ea8-bfc5-efdd127637de\" (UID: \"a3e32d48-0574-4ea8-bfc5-efdd127637de\") " Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.754660 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host" (OuterVolumeSpecName: "host") pod "a3e32d48-0574-4ea8-bfc5-efdd127637de" (UID: "a3e32d48-0574-4ea8-bfc5-efdd127637de"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.755140 4793 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a3e32d48-0574-4ea8-bfc5-efdd127637de-host\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.776785 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w" (OuterVolumeSpecName: "kube-api-access-fm92w") pod "a3e32d48-0574-4ea8-bfc5-efdd127637de" (UID: "a3e32d48-0574-4ea8-bfc5-efdd127637de"). InnerVolumeSpecName "kube-api-access-fm92w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:43:30 crc kubenswrapper[4793]: I0127 22:43:30.862974 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm92w\" (UniqueName: \"kubernetes.io/projected/a3e32d48-0574-4ea8-bfc5-efdd127637de-kube-api-access-fm92w\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:31 crc kubenswrapper[4793]: I0127 22:43:31.034942 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-5z8bk"] Jan 27 22:43:31 crc kubenswrapper[4793]: I0127 22:43:31.046340 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-5z8bk"] Jan 27 22:43:31 crc kubenswrapper[4793]: I0127 22:43:31.606349 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b702a70c1c5556909487787e85e331dab2c62208f31b695752cc23c621ea440" Jan 27 22:43:31 crc kubenswrapper[4793]: I0127 22:43:31.606415 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-5z8bk" Jan 27 22:43:31 crc kubenswrapper[4793]: I0127 22:43:31.828468 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3e32d48-0574-4ea8-bfc5-efdd127637de" path="/var/lib/kubelet/pods/a3e32d48-0574-4ea8-bfc5-efdd127637de/volumes" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.281055 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-s64mx/crc-debug-6bg2z"] Jan 27 22:43:32 crc kubenswrapper[4793]: E0127 22:43:32.281810 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3e32d48-0574-4ea8-bfc5-efdd127637de" containerName="container-00" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.281840 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3e32d48-0574-4ea8-bfc5-efdd127637de" containerName="container-00" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.282294 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3e32d48-0574-4ea8-bfc5-efdd127637de" containerName="container-00" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.283624 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.296507 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqqfj\" (UniqueName: \"kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.296960 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.400518 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqqfj\" (UniqueName: \"kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.401117 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.401278 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.437846 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqqfj\" (UniqueName: \"kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj\") pod \"crc-debug-6bg2z\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: I0127 22:43:32.614148 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:32 crc kubenswrapper[4793]: W0127 22:43:32.678162 4793 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c282b97_703d_44ba_abda_5f000be24afc.slice/crio-b45b2669cc814cc7716e12f55359fec153b70fa3f735bf83e3789640617671fe WatchSource:0}: Error finding container b45b2669cc814cc7716e12f55359fec153b70fa3f735bf83e3789640617671fe: Status 404 returned error can't find the container with id b45b2669cc814cc7716e12f55359fec153b70fa3f735bf83e3789640617671fe Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.630934 4793 generic.go:334] "Generic (PLEG): container finished" podID="2c282b97-703d-44ba-abda-5f000be24afc" containerID="8125578267d2b87dbb1607bddca96ba526c08ff2f7ba11556e4705a611c476ce" exitCode=0 Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.631099 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" event={"ID":"2c282b97-703d-44ba-abda-5f000be24afc","Type":"ContainerDied","Data":"8125578267d2b87dbb1607bddca96ba526c08ff2f7ba11556e4705a611c476ce"} Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.631728 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" event={"ID":"2c282b97-703d-44ba-abda-5f000be24afc","Type":"ContainerStarted","Data":"b45b2669cc814cc7716e12f55359fec153b70fa3f735bf83e3789640617671fe"} Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.694803 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-6bg2z"] Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.707559 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-s64mx/crc-debug-6bg2z"] Jan 27 22:43:33 crc kubenswrapper[4793]: I0127 22:43:33.807160 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:43:33 crc kubenswrapper[4793]: E0127 22:43:33.808296 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.761865 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.871686 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqqfj\" (UniqueName: \"kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj\") pod \"2c282b97-703d-44ba-abda-5f000be24afc\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.871875 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host\") pod \"2c282b97-703d-44ba-abda-5f000be24afc\" (UID: \"2c282b97-703d-44ba-abda-5f000be24afc\") " Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.872022 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host" (OuterVolumeSpecName: "host") pod "2c282b97-703d-44ba-abda-5f000be24afc" (UID: "2c282b97-703d-44ba-abda-5f000be24afc"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.872913 4793 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c282b97-703d-44ba-abda-5f000be24afc-host\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.885704 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj" (OuterVolumeSpecName: "kube-api-access-pqqfj") pod "2c282b97-703d-44ba-abda-5f000be24afc" (UID: "2c282b97-703d-44ba-abda-5f000be24afc"). InnerVolumeSpecName "kube-api-access-pqqfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:43:34 crc kubenswrapper[4793]: I0127 22:43:34.975454 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqqfj\" (UniqueName: \"kubernetes.io/projected/2c282b97-703d-44ba-abda-5f000be24afc-kube-api-access-pqqfj\") on node \"crc\" DevicePath \"\"" Jan 27 22:43:35 crc kubenswrapper[4793]: I0127 22:43:35.667690 4793 scope.go:117] "RemoveContainer" containerID="8125578267d2b87dbb1607bddca96ba526c08ff2f7ba11556e4705a611c476ce" Jan 27 22:43:35 crc kubenswrapper[4793]: I0127 22:43:35.667760 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/crc-debug-6bg2z" Jan 27 22:43:35 crc kubenswrapper[4793]: I0127 22:43:35.823396 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c282b97-703d-44ba-abda-5f000be24afc" path="/var/lib/kubelet/pods/2c282b97-703d-44ba-abda-5f000be24afc/volumes" Jan 27 22:43:36 crc kubenswrapper[4793]: I0127 22:43:36.803622 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:43:36 crc kubenswrapper[4793]: E0127 22:43:36.804196 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:43:45 crc kubenswrapper[4793]: I0127 22:43:45.811941 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:43:45 crc kubenswrapper[4793]: E0127 22:43:45.812673 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:43:48 crc kubenswrapper[4793]: I0127 22:43:48.803869 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:43:48 crc kubenswrapper[4793]: E0127 22:43:48.804494 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:43:58 crc kubenswrapper[4793]: I0127 22:43:58.804350 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:43:58 crc kubenswrapper[4793]: E0127 22:43:58.805424 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:44:03 crc kubenswrapper[4793]: I0127 22:44:03.805919 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:44:03 crc kubenswrapper[4793]: E0127 22:44:03.806853 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:44:10 crc kubenswrapper[4793]: I0127 22:44:10.905117 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7b5cf4cb74-b6v79_15d23321-0811-4752-b014-9a4f08ceac3f/barbican-api/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.191189 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-547c9dc95d-r6k22_3d9caabf-4fb7-4374-966f-27ca72ed8ad3/barbican-keystone-listener/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.206333 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7b5cf4cb74-b6v79_15d23321-0811-4752-b014-9a4f08ceac3f/barbican-api-log/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.275789 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-547c9dc95d-r6k22_3d9caabf-4fb7-4374-966f-27ca72ed8ad3/barbican-keystone-listener-log/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.440018 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f6f4d8b5c-97rd8_6139a6f4-f2b8-48f3-8997-e560f4deb75f/barbican-worker/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.472610 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f6f4d8b5c-97rd8_6139a6f4-f2b8-48f3-8997-e560f4deb75f/barbican-worker-log/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.709603 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-rhcqm_b9480a2b-9979-4554-a98e-143e758ba256/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.803174 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:44:11 crc kubenswrapper[4793]: E0127 22:44:11.803517 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.867998 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_904d3d32-2c98-4e0d-b8e7-6554e661d780/ceilometer-central-agent/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.886132 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_904d3d32-2c98-4e0d-b8e7-6554e661d780/ceilometer-notification-agent/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.939635 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_904d3d32-2c98-4e0d-b8e7-6554e661d780/sg-core/0.log" Jan 27 22:44:11 crc kubenswrapper[4793]: I0127 22:44:11.967840 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_904d3d32-2c98-4e0d-b8e7-6554e661d780/proxy-httpd/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.183372 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_006b6058-ee89-4438-92d7-3c02b8136803/cinder-api-log/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.516177 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_c765d809-ba2f-47e7-a54c-e6140a738c8d/probe/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.684368 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_006b6058-ee89-4438-92d7-3c02b8136803/cinder-api/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.713571 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_c765d809-ba2f-47e7-a54c-e6140a738c8d/cinder-backup/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.736374 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_8a6fa3f9-7dd5-47d9-8650-eb700dc18497/cinder-scheduler/0.log" Jan 27 22:44:12 crc kubenswrapper[4793]: I0127 22:44:12.848172 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_8a6fa3f9-7dd5-47d9-8650-eb700dc18497/probe/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.028285 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0/probe/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.089509 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_37cd0dd8-9fd1-41a0-bb1d-9894f3a627f0/cinder-volume/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.363728 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_768192cb-07c6-4ce7-b090-7d277a7c4d58/probe/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.391572 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-2pcs5_411ebcf6-5cec-4604-9a7c-2f3c720296d6/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.414795 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_768192cb-07c6-4ce7-b090-7d277a7c4d58/cinder-volume/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.892716 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-l2g64_93d0a92d-00df-4317-a361-d4d1858b0602/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:13 crc kubenswrapper[4793]: I0127 22:44:13.928691 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667cf85d85-npcsc_822c33e2-e40a-4194-8ec0-f413e4915457/init/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.075507 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667cf85d85-npcsc_822c33e2-e40a-4194-8ec0-f413e4915457/init/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.168824 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-mdmsr_2ac35f24-57b8-4521-8509-5adc5ae84b60/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.284830 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667cf85d85-npcsc_822c33e2-e40a-4194-8ec0-f413e4915457/dnsmasq-dns/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.441277 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c/glance-log/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.464912 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_51d1e06b-2f11-4cf6-ba2c-ebdb0d2e1c0c/glance-httpd/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.644422 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_bf13be98-0b5a-4b5b-8b85-696c9c35101d/glance-log/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.690720 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_bf13be98-0b5a-4b5b-8b85-696c9c35101d/glance-httpd/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.848137 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-57fc549f96-h7nth_598878f3-c1fc-481f-ad69-dacba44a1ccc/horizon/0.log" Jan 27 22:44:14 crc kubenswrapper[4793]: I0127 22:44:14.933089 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-h785f_03785202-a2ac-4a5d-a761-40636c332578/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.249194 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-k4h65_ab303649-e7fe-4056-8414-9afda486e099/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.562755 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29492461-zkrn5_42f58700-9ffd-4ba4-806d-13d345c8923c/keystone-cron/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.855219 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-74bfddb9f7-8qtb8_6ed6109b-d066-45e9-81e5-7d7a42c55b77/keystone-api/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.872663 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29492521-tfmps_53e5f200-1d60-40e6-94e1-dc9d928c2785/keystone-cron/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.912759 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c524394c-23d4-4fb4-b41f-0b3151bae4d1/kube-state-metrics/0.log" Jan 27 22:44:15 crc kubenswrapper[4793]: I0127 22:44:15.980308 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-57fc549f96-h7nth_598878f3-c1fc-481f-ad69-dacba44a1ccc/horizon-log/0.log" Jan 27 22:44:16 crc kubenswrapper[4793]: I0127 22:44:16.171313 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-tpf4h_b3a616d9-776d-49a1-88d7-3292fdbdb7b6/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:16 crc kubenswrapper[4793]: I0127 22:44:16.512296 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5875f8f849-hlxzn_0510c44d-95e6-4986-a108-87c160fac699/neutron-httpd/0.log" Jan 27 22:44:16 crc kubenswrapper[4793]: I0127 22:44:16.591373 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5875f8f849-hlxzn_0510c44d-95e6-4986-a108-87c160fac699/neutron-api/0.log" Jan 27 22:44:16 crc kubenswrapper[4793]: I0127 22:44:16.631324 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-qszpc_124c5ea7-93cd-46ae-be46-fb00f74edaa4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:17 crc kubenswrapper[4793]: I0127 22:44:17.340075 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_d442904a-5793-4375-a27a-3d80e7214ac4/nova-cell0-conductor-conductor/0.log" Jan 27 22:44:17 crc kubenswrapper[4793]: I0127 22:44:17.643057 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_226a19ba-8411-43a8-966c-f1ea2d67a5bd/nova-cell1-conductor-conductor/0.log" Jan 27 22:44:17 crc kubenswrapper[4793]: I0127 22:44:17.804159 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:44:17 crc kubenswrapper[4793]: E0127 22:44:17.804484 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:44:17 crc kubenswrapper[4793]: I0127 22:44:17.887176 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_2d5a3a2a-60fe-4c25-9b34-39831787c64d/nova-api-log/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.215472 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-pb8b2_187681ff-22a7-4ec2-97f0-94d51c9dc1ca/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.326279 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_54fdd2d8-8d6e-41f5-9a60-e7367b399aa8/nova-cell1-novncproxy-novncproxy/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.524146 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_25954f87-0c42-46c3-abb5-5ec9932665a9/nova-metadata-log/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.753676 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_2d5a3a2a-60fe-4c25-9b34-39831787c64d/nova-api-api/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.929399 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_45849ced-f655-4b51-a545-b8fd1c0e3d09/nova-scheduler-scheduler/0.log" Jan 27 22:44:18 crc kubenswrapper[4793]: I0127 22:44:18.980168 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cd405c61-4515-4524-8b4c-c30fcc225b3b/mysql-bootstrap/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.207083 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cd405c61-4515-4524-8b4c-c30fcc225b3b/mysql-bootstrap/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.225511 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cd405c61-4515-4524-8b4c-c30fcc225b3b/galera/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.420257 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974/mysql-bootstrap/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.653071 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974/galera/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.715005 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_64383dfb-bc2d-4ed6-b2d9-0a9fd4f2d974/mysql-bootstrap/0.log" Jan 27 22:44:19 crc kubenswrapper[4793]: I0127 22:44:19.883146 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_e65fb7be-afef-4a68-b5a9-e772125ee668/openstackclient/0.log" Jan 27 22:44:20 crc kubenswrapper[4793]: I0127 22:44:20.013834 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-fxmkt_590c61a6-8355-4f4f-be2b-4680745b4732/ovn-controller/0.log" Jan 27 22:44:20 crc kubenswrapper[4793]: I0127 22:44:20.345582 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-v5zns_dd80fd2c-f25f-4077-8860-e7296040a46f/openstack-network-exporter/0.log" Jan 27 22:44:20 crc kubenswrapper[4793]: I0127 22:44:20.409616 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rrm85_e8926f6a-2dd1-4b4d-912b-8f11e9c51832/ovsdb-server-init/0.log" Jan 27 22:44:20 crc kubenswrapper[4793]: I0127 22:44:20.641579 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rrm85_e8926f6a-2dd1-4b4d-912b-8f11e9c51832/ovsdb-server/0.log" Jan 27 22:44:20 crc kubenswrapper[4793]: I0127 22:44:20.654584 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rrm85_e8926f6a-2dd1-4b4d-912b-8f11e9c51832/ovsdb-server-init/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.000067 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vbrjv_f120b137-90a3-45e8-946f-5d32e682696a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.105705 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rrm85_e8926f6a-2dd1-4b4d-912b-8f11e9c51832/ovs-vswitchd/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.219465 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda/openstack-network-exporter/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.347489 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_08f0f5f2-6088-4d9e-a8fb-a5b87c64ceda/ovn-northd/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.651953 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_58054670-14c6-4c95-8791-edb32ef325da/openstack-network-exporter/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.806885 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_58054670-14c6-4c95-8791-edb32ef325da/ovsdbserver-nb/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.829746 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_6e42416e-bfb3-4e3e-a640-fe1bcbb54928/openstack-network-exporter/0.log" Jan 27 22:44:21 crc kubenswrapper[4793]: I0127 22:44:21.997393 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_6e42416e-bfb3-4e3e-a640-fe1bcbb54928/ovsdbserver-sb/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.302941 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-988699dd4-wjjzw_98e27e40-e02e-41a8-8935-f29b264435a7/placement-api/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.473083 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8230a67d-9b25-4098-8bc7-a934934d4084/init-config-reloader/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.503093 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-988699dd4-wjjzw_98e27e40-e02e-41a8-8935-f29b264435a7/placement-log/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.602126 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_25954f87-0c42-46c3-abb5-5ec9932665a9/nova-metadata-metadata/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.718989 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8230a67d-9b25-4098-8bc7-a934934d4084/config-reloader/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.737048 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8230a67d-9b25-4098-8bc7-a934934d4084/init-config-reloader/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.796408 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8230a67d-9b25-4098-8bc7-a934934d4084/prometheus/0.log" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.804609 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:44:22 crc kubenswrapper[4793]: E0127 22:44:22.804961 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:44:22 crc kubenswrapper[4793]: I0127 22:44:22.890125 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8230a67d-9b25-4098-8bc7-a934934d4084/thanos-sidecar/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.058293 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6b5f0924-d10f-4e93-963c-de03d16f48c1/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.223423 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6b5f0924-d10f-4e93-963c-de03d16f48c1/rabbitmq/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.241345 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6b5f0924-d10f-4e93-963c-de03d16f48c1/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.303175 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_d13b401d-8f36-4677-b782-ebf9a3d5daab/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.591372 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c4797dd0-4754-4037-983f-64d2aa1fa902/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.609372 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_d13b401d-8f36-4677-b782-ebf9a3d5daab/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.664702 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_d13b401d-8f36-4677-b782-ebf9a3d5daab/rabbitmq/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.938881 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c4797dd0-4754-4037-983f-64d2aa1fa902/setup-container/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.952359 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-fvs7m_3c67116a-7a1e-4e35-8652-4a453d81e4de/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:23 crc kubenswrapper[4793]: I0127 22:44:23.965086 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c4797dd0-4754-4037-983f-64d2aa1fa902/rabbitmq/0.log" Jan 27 22:44:24 crc kubenswrapper[4793]: I0127 22:44:24.156006 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-cfnm5_41c7f13a-589b-496e-9709-e5270fb0e6aa/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:24 crc kubenswrapper[4793]: I0127 22:44:24.197134 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-6ggqb_597edc01-51d3-4199-ae65-a0439d6bbf66/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:24 crc kubenswrapper[4793]: I0127 22:44:24.724975 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-dgldl_86d391ca-f72f-4332-9d2b-568200608a9e/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:24 crc kubenswrapper[4793]: I0127 22:44:24.890911 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-tlxlc_018aa89c-0173-426e-b107-81c9b171c475/ssh-known-hosts-edpm-deployment/0.log" Jan 27 22:44:24 crc kubenswrapper[4793]: I0127 22:44:24.958461 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5564bdd769-xf2h4_1aeeeb43-14d9-471a-8433-825ee93be32b/proxy-server/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.078070 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-2m2n7_f5ddc141-eae8-4a4c-b118-a79a9276cf33/swift-ring-rebalance/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.218421 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5564bdd769-xf2h4_1aeeeb43-14d9-471a-8433-825ee93be32b/proxy-httpd/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.271363 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/account-auditor/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.320335 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/account-reaper/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.472020 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/account-server/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.531315 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/account-replicator/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.544472 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/container-auditor/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.621667 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/container-replicator/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.638756 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/container-server/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.746354 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/container-updater/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.876613 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/object-expirer/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.881387 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/object-auditor/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.884496 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/object-replicator/0.log" Jan 27 22:44:25 crc kubenswrapper[4793]: I0127 22:44:25.964529 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/object-server/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.112037 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/rsync/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.118916 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/swift-recon-cron/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.148968 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6a954bdd-89aa-4d5c-8034-5c8ed27e8652/object-updater/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.365600 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ckfqs_9d45706e-d075-45fc-9d80-b21908572463/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.593928 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_8b5d6801-ee34-4795-926a-d9ec345d7eae/test-operator-logs-container/0.log" Jan 27 22:44:26 crc kubenswrapper[4793]: I0127 22:44:26.762626 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-682ms_68e3cfe1-7fc5-4cf0-89e9-fcd526e8fa6f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 22:44:27 crc kubenswrapper[4793]: I0127 22:44:27.364780 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_045591bb-dd8c-437e-9cf8-0e0b520fc49d/watcher-applier/31.log" Jan 27 22:44:27 crc kubenswrapper[4793]: I0127 22:44:27.451519 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_7c64ec91-a07a-470e-a490-2ad9c6a06248/tempest-tests-tempest-tests-runner/0.log" Jan 27 22:44:27 crc kubenswrapper[4793]: I0127 22:44:27.594003 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_045591bb-dd8c-437e-9cf8-0e0b520fc49d/watcher-applier/31.log" Jan 27 22:44:27 crc kubenswrapper[4793]: I0127 22:44:27.743743 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_6609a1f3-1bd1-4179-99fb-2dc0f32df09d/watcher-api-log/0.log" Jan 27 22:44:29 crc kubenswrapper[4793]: I0127 22:44:29.982020 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_0bd671fd-f02a-4b8d-ad50-60ee8e6c5cc0/watcher-decision-engine/0.log" Jan 27 22:44:30 crc kubenswrapper[4793]: I0127 22:44:30.803136 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:44:30 crc kubenswrapper[4793]: E0127 22:44:30.803625 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:44:34 crc kubenswrapper[4793]: I0127 22:44:34.067140 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_6609a1f3-1bd1-4179-99fb-2dc0f32df09d/watcher-api/0.log" Jan 27 22:44:37 crc kubenswrapper[4793]: I0127 22:44:37.803746 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:44:37 crc kubenswrapper[4793]: E0127 22:44:37.804455 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:44:45 crc kubenswrapper[4793]: I0127 22:44:45.811984 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:44:45 crc kubenswrapper[4793]: E0127 22:44:45.814343 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:44:46 crc kubenswrapper[4793]: I0127 22:44:46.440177 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_da5aef81-265e-457b-bd86-b770db112298/memcached/0.log" Jan 27 22:44:49 crc kubenswrapper[4793]: I0127 22:44:49.803807 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:44:49 crc kubenswrapper[4793]: E0127 22:44:49.804708 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:44:56 crc kubenswrapper[4793]: I0127 22:44:56.804383 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:44:56 crc kubenswrapper[4793]: E0127 22:44:56.805577 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.168784 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj"] Jan 27 22:45:00 crc kubenswrapper[4793]: E0127 22:45:00.170295 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c282b97-703d-44ba-abda-5f000be24afc" containerName="container-00" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.170322 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c282b97-703d-44ba-abda-5f000be24afc" containerName="container-00" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.170777 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c282b97-703d-44ba-abda-5f000be24afc" containerName="container-00" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.172000 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.178137 4793 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.181200 4793 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.186064 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj"] Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.235027 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrd5h\" (UniqueName: \"kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.235370 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.235690 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.343821 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrd5h\" (UniqueName: \"kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.344095 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.344191 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.345833 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.356757 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.371885 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrd5h\" (UniqueName: \"kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h\") pod \"collect-profiles-29492565-v7lcj\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.495461 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:00 crc kubenswrapper[4793]: I0127 22:45:00.956519 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj"] Jan 27 22:45:01 crc kubenswrapper[4793]: I0127 22:45:01.733910 4793 generic.go:334] "Generic (PLEG): container finished" podID="23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" containerID="8e71917117c16cb8e1849d04967eb4648d2c5171f08a2ebbd58ca80c32b827b4" exitCode=0 Jan 27 22:45:01 crc kubenswrapper[4793]: I0127 22:45:01.734117 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" event={"ID":"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf","Type":"ContainerDied","Data":"8e71917117c16cb8e1849d04967eb4648d2c5171f08a2ebbd58ca80c32b827b4"} Jan 27 22:45:01 crc kubenswrapper[4793]: I0127 22:45:01.735440 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" event={"ID":"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf","Type":"ContainerStarted","Data":"7166d5231ed1600fe056edb3bc3fed20b24f40e320c64661c09ed11bc24483cf"} Jan 27 22:45:02 crc kubenswrapper[4793]: I0127 22:45:02.376695 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/util/0.log" Jan 27 22:45:02 crc kubenswrapper[4793]: I0127 22:45:02.665466 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/util/0.log" Jan 27 22:45:02 crc kubenswrapper[4793]: I0127 22:45:02.936517 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/pull/0.log" Jan 27 22:45:02 crc kubenswrapper[4793]: I0127 22:45:02.965733 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/pull/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.201880 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/extract/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.207490 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/pull/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.251413 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.333094 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume\") pod \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.333355 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrd5h\" (UniqueName: \"kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h\") pod \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.333512 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume\") pod \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\" (UID: \"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf\") " Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.333918 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume" (OuterVolumeSpecName: "config-volume") pod "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" (UID: "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.334782 4793 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.352778 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" (UID: "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.377797 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h" (OuterVolumeSpecName: "kube-api-access-qrd5h") pod "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" (UID: "23289cee-e86d-49fe-9bd9-27a6f3b6ccaf"). InnerVolumeSpecName "kube-api-access-qrd5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.384558 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_6a404b0edbde7b063a3b8feb27ed03380095c90d044c1548d49fd3517a4c99c_fab801dc-51a6-4937-9a1b-67eb5db6c0a9/util/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.436504 4793 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.436537 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrd5h\" (UniqueName: \"kubernetes.io/projected/23289cee-e86d-49fe-9bd9-27a6f3b6ccaf-kube-api-access-qrd5h\") on node \"crc\" DevicePath \"\"" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.511427 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7f86f8796f-jn9j9_42bc0dc3-e9b2-4edc-865a-4d301956ec59/manager/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.645445 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7478f7dbf9-qt685_01780455-12a9-41cc-80bb-71d643522796/manager/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.754812 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" event={"ID":"23289cee-e86d-49fe-9bd9-27a6f3b6ccaf","Type":"ContainerDied","Data":"7166d5231ed1600fe056edb3bc3fed20b24f40e320c64661c09ed11bc24483cf"} Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.754925 4793 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7166d5231ed1600fe056edb3bc3fed20b24f40e320c64661c09ed11bc24483cf" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.754851 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492565-v7lcj" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.760641 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-pddnh_4105e86a-92bd-45a9-869f-310d634a514c/manager/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.910103 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-crv46_44104a6a-d41a-4b4c-b119-1886c9b48a8b/manager/0.log" Jan 27 22:45:03 crc kubenswrapper[4793]: I0127 22:45:03.994829 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-k9gch_cb13d95b-3b92-49bc-9bcd-af7d24b09869/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.102241 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-g4rnc_4bd1bccc-c605-43f0-a9ed-1f82139a6f16/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.332883 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-598f7747c9-h2w8c_81ead9a2-beaa-4651-a9ee-aaeda0acd7c3/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.395592 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl"] Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.406898 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492520-8h5tl"] Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.449308 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-694cf4f878-752f2_742adff5-75e8-4941-9815-03bc77850cfa/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.564573 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-795xn_744f20ad-f891-4948-8a8e-e0757333b75b/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.628661 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-m5bwb_ddb0bf87-c13a-48d3-9fa7-95c891f6c057/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.783100 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6b9fb5fdcb-824xz_a78a18e3-25af-470d-8d1a-3bbc5b936703/manager/0.log" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.803583 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:45:04 crc kubenswrapper[4793]: E0127 22:45:04.803856 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:45:04 crc kubenswrapper[4793]: I0127 22:45:04.900064 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78d58447c5-mfx2z_a227b0fb-079f-4d81-9f3e-380202824892/manager/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.057369 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-7bdb645866-clx4z_3d4f2efb-7e15-4d10-ad22-a22b7ef0eb67/manager/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.078611 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5f4cd88d46-t5lwz_ff2db85c-6bbf-45c8-b27b-93b2cff54130/manager/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.312018 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b85428jbw_85aa101f-3371-46e6-84e5-83005bdb7799/manager/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.355258 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-77fb6f4c55-gn2gz_6c36140e-9ce9-4400-9c4f-00041bb11a41/operator/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.735703 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7ll2b_cff9b66d-6eb7-4fd6-8f40-32928a43df4d/registry-server/0.log" Jan 27 22:45:05 crc kubenswrapper[4793]: I0127 22:45:05.821449 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df8023ae-0b9e-4c7d-9c2f-4d1cfb697216" path="/var/lib/kubelet/pods/df8023ae-0b9e-4c7d-9c2f-4d1cfb697216/volumes" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.000431 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6f75f45d54-n85zq_1da2285e-00b1-4b86-993c-47be2c441dc0/manager/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.148140 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-79d5ccc684-wqcbj_d0f1b177-1cdf-45ce-a4f3-5cb9a4a3610d/manager/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.398886 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-khvmz_b0693604-a724-43ba-92b9-d4a52ca4cf85/operator/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.669420 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-tqmbv_71401df0-2118-4561-9d06-e311458f5357/manager/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.696193 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5b4bbd6fc8-fhjlh_68f23063-63cd-4c28-ae7a-e4e2d1b8ca4b/manager/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.862895 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-gh2bm_40d57f5e-8c71-4278-b169-c1c439c8fe4a/manager/0.log" Jan 27 22:45:06 crc kubenswrapper[4793]: I0127 22:45:06.892311 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-q2ft5_f6cc0f9d-084e-46a3-a2b9-691474c16005/manager/0.log" Jan 27 22:45:07 crc kubenswrapper[4793]: I0127 22:45:07.048931 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5895dd5db-pkckf_32f012e5-4f42-4aaf-bc4a-25ad68296efc/manager/0.log" Jan 27 22:45:07 crc kubenswrapper[4793]: I0127 22:45:07.803192 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:45:07 crc kubenswrapper[4793]: E0127 22:45:07.803538 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:45:16 crc kubenswrapper[4793]: I0127 22:45:16.888196 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:45:16 crc kubenswrapper[4793]: E0127 22:45:16.888835 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:45:18 crc kubenswrapper[4793]: I0127 22:45:18.803394 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:45:18 crc kubenswrapper[4793]: E0127 22:45:18.803910 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:45:27 crc kubenswrapper[4793]: I0127 22:45:27.804689 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:45:27 crc kubenswrapper[4793]: E0127 22:45:27.805534 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:45:29 crc kubenswrapper[4793]: I0127 22:45:29.272603 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4lgxd_ba1e1437-9755-498f-b07f-40997bdfa64c/control-plane-machine-set-operator/0.log" Jan 27 22:45:29 crc kubenswrapper[4793]: I0127 22:45:29.457083 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sghkk_7be50072-4d5b-4ef3-a534-bdce40d627cb/machine-api-operator/0.log" Jan 27 22:45:29 crc kubenswrapper[4793]: I0127 22:45:29.476015 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sghkk_7be50072-4d5b-4ef3-a534-bdce40d627cb/kube-rbac-proxy/0.log" Jan 27 22:45:29 crc kubenswrapper[4793]: I0127 22:45:29.803981 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:45:29 crc kubenswrapper[4793]: E0127 22:45:29.804365 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:45:39 crc kubenswrapper[4793]: I0127 22:45:39.803141 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:45:39 crc kubenswrapper[4793]: E0127 22:45:39.803899 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:45:40 crc kubenswrapper[4793]: I0127 22:45:40.095770 4793 scope.go:117] "RemoveContainer" containerID="ec0aeae4e2cdb457d681a8e50b51d8817b148ab2c535b02ef1cf26db61baa880" Jan 27 22:45:43 crc kubenswrapper[4793]: I0127 22:45:43.804134 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:45:43 crc kubenswrapper[4793]: E0127 22:45:43.804800 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:45:43 crc kubenswrapper[4793]: I0127 22:45:43.914761 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-c48q7_248ec9d6-6acc-4c1e-bd45-9c51293869d1/cert-manager-controller/0.log" Jan 27 22:45:44 crc kubenswrapper[4793]: I0127 22:45:44.093101 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-q928d_2b42206d-0339-46d2-9d21-a1486a3b671e/cert-manager-cainjector/0.log" Jan 27 22:45:44 crc kubenswrapper[4793]: I0127 22:45:44.123983 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fhq5k_62328e2f-a41e-4337-993d-d99b5fc3cbc6/cert-manager-webhook/0.log" Jan 27 22:45:52 crc kubenswrapper[4793]: I0127 22:45:52.803020 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:45:52 crc kubenswrapper[4793]: E0127 22:45:52.805319 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:45:58 crc kubenswrapper[4793]: I0127 22:45:58.803591 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:45:58 crc kubenswrapper[4793]: E0127 22:45:58.804441 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.124726 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-gkmrf_fb3ff166-af70-4d4a-b729-6a13686fa910/nmstate-handler/0.log" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.152897 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-mrr9k_f65ddc46-2da9-403d-bdd9-f9c5825fca15/nmstate-console-plugin/0.log" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.327399 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jvz78_84de09db-daa3-41cb-b012-c231dff18838/nmstate-metrics/0.log" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.347351 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jvz78_84de09db-daa3-41cb-b012-c231dff18838/kube-rbac-proxy/0.log" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.512875 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-qmrh5_042b00b6-81fb-45a3-92be-37d3666ade02/nmstate-operator/0.log" Jan 27 22:46:01 crc kubenswrapper[4793]: I0127 22:46:01.604438 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-tqg4n_21d1a1e4-f1c8-41fd-9ac0-91ad4003215a/nmstate-webhook/0.log" Jan 27 22:46:04 crc kubenswrapper[4793]: I0127 22:46:04.804240 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:46:04 crc kubenswrapper[4793]: E0127 22:46:04.804863 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:46:12 crc kubenswrapper[4793]: I0127 22:46:12.803951 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:46:12 crc kubenswrapper[4793]: E0127 22:46:12.804746 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:18 crc kubenswrapper[4793]: I0127 22:46:18.575226 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-557c7_d778ab64-052f-4ffb-a80d-52d1807a499c/prometheus-operator/0.log" Jan 27 22:46:18 crc kubenswrapper[4793]: I0127 22:46:18.756830 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59_15c11c1c-e768-4e99-ac32-157dbd118043/prometheus-operator-admission-webhook/0.log" Jan 27 22:46:18 crc kubenswrapper[4793]: I0127 22:46:18.803838 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:46:18 crc kubenswrapper[4793]: E0127 22:46:18.804130 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:46:18 crc kubenswrapper[4793]: I0127 22:46:18.810962 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh_cdf9943f-a502-4e73-bbb7-5b638de02443/prometheus-operator-admission-webhook/0.log" Jan 27 22:46:18 crc kubenswrapper[4793]: I0127 22:46:18.964384 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-ml85d_b58cf118-0ae5-43f3-bf3c-f5f01eb636ba/operator/0.log" Jan 27 22:46:19 crc kubenswrapper[4793]: I0127 22:46:19.035877 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-x6b5g_5f176291-9b04-49fc-ad48-fe1552b2bcaf/perses-operator/0.log" Jan 27 22:46:23 crc kubenswrapper[4793]: I0127 22:46:23.803610 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:46:23 crc kubenswrapper[4793]: E0127 22:46:23.804993 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:33 crc kubenswrapper[4793]: I0127 22:46:33.804812 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:46:33 crc kubenswrapper[4793]: E0127 22:46:33.805812 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.297797 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-r9hdv_5977e87a-93dd-4494-a4f7-cdc151fae6f4/kube-rbac-proxy/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.418728 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-r9hdv_5977e87a-93dd-4494-a4f7-cdc151fae6f4/controller/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.506820 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-frr-files/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.617808 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-reloader/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.648949 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-frr-files/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.656691 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-metrics/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.715521 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-reloader/0.log" Jan 27 22:46:35 crc kubenswrapper[4793]: I0127 22:46:35.809407 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.055770 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-frr-files/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.074038 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-reloader/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.101791 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-metrics/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.123676 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-metrics/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.294901 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-frr-files/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.314469 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-reloader/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.319931 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/controller/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.326033 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/cp-metrics/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.534831 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/kube-rbac-proxy/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.548097 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/kube-rbac-proxy-frr/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.571574 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/frr-metrics/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.796279 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-j6cjw_a694ab3e-9452-4d89-aad5-7dca775c9481/frr-k8s-webhook-server/0.log" Jan 27 22:46:36 crc kubenswrapper[4793]: I0127 22:46:36.817144 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/reloader/0.log" Jan 27 22:46:37 crc kubenswrapper[4793]: I0127 22:46:37.068295 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-77ddf8bbff-6skqs_3cb9730b-6fca-41ce-a6d8-9215c13b01e0/manager/0.log" Jan 27 22:46:37 crc kubenswrapper[4793]: I0127 22:46:37.071560 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1"} Jan 27 22:46:37 crc kubenswrapper[4793]: I0127 22:46:37.208057 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8fb588c74-mjlw7_016b5dea-69f7-4abf-8e8a-72adc4922be9/webhook-server/0.log" Jan 27 22:46:37 crc kubenswrapper[4793]: I0127 22:46:37.340629 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rzvg6_90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e/kube-rbac-proxy/0.log" Jan 27 22:46:38 crc kubenswrapper[4793]: I0127 22:46:38.051508 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rzvg6_90e6b2bf-caa8-4ac8-aa2b-4c9389a3666e/speaker/0.log" Jan 27 22:46:38 crc kubenswrapper[4793]: I0127 22:46:38.242429 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:46:38 crc kubenswrapper[4793]: I0127 22:46:38.243309 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:46:38 crc kubenswrapper[4793]: I0127 22:46:38.282333 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 27 22:46:38 crc kubenswrapper[4793]: I0127 22:46:38.778423 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9wlsd_6cdd56cd-76a3-41b7-8ed8-4446462605e3/frr/0.log" Jan 27 22:46:39 crc kubenswrapper[4793]: I0127 22:46:39.134256 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 27 22:46:40 crc kubenswrapper[4793]: I0127 22:46:40.100845 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" exitCode=1 Jan 27 22:46:40 crc kubenswrapper[4793]: I0127 22:46:40.100958 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1"} Jan 27 22:46:40 crc kubenswrapper[4793]: I0127 22:46:40.101297 4793 scope.go:117] "RemoveContainer" containerID="5396c186af49ec50d6b69cc1b999659f3cb369d8eedc99226848dbe7dbd3c24f" Jan 27 22:46:40 crc kubenswrapper[4793]: I0127 22:46:40.101945 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:46:40 crc kubenswrapper[4793]: E0127 22:46:40.102532 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:41 crc kubenswrapper[4793]: I0127 22:46:41.112864 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:46:41 crc kubenswrapper[4793]: E0127 22:46:41.113453 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:43 crc kubenswrapper[4793]: I0127 22:46:43.244693 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:46:43 crc kubenswrapper[4793]: I0127 22:46:43.245819 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:46:43 crc kubenswrapper[4793]: E0127 22:46:43.246191 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:44 crc kubenswrapper[4793]: I0127 22:46:44.803640 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:46:44 crc kubenswrapper[4793]: E0127 22:46:44.804341 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:46:48 crc kubenswrapper[4793]: I0127 22:46:48.243004 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:46:48 crc kubenswrapper[4793]: I0127 22:46:48.243307 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:46:48 crc kubenswrapper[4793]: I0127 22:46:48.244181 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:46:48 crc kubenswrapper[4793]: E0127 22:46:48.244481 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:56 crc kubenswrapper[4793]: I0127 22:46:56.785212 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/util/0.log" Jan 27 22:46:56 crc kubenswrapper[4793]: I0127 22:46:56.971767 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/pull/0.log" Jan 27 22:46:56 crc kubenswrapper[4793]: I0127 22:46:56.983833 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/util/0.log" Jan 27 22:46:57 crc kubenswrapper[4793]: I0127 22:46:57.026510 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/pull/0.log" Jan 27 22:46:57 crc kubenswrapper[4793]: I0127 22:46:57.346118 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/util/0.log" Jan 27 22:46:57 crc kubenswrapper[4793]: I0127 22:46:57.458825 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/pull/0.log" Jan 27 22:46:57 crc kubenswrapper[4793]: I0127 22:46:57.471504 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcdf6ft_2545c17b-cfc8-46bc-ad6c-9d7b2e74a216/extract/0.log" Jan 27 22:46:57 crc kubenswrapper[4793]: I0127 22:46:57.617473 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.062317 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:46:58 crc kubenswrapper[4793]: E0127 22:46:58.076157 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.198269 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.220984 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/pull/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.221203 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/pull/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.422624 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/pull/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.444288 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.489828 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713j9bvg_1012c0ab-38db-4f3d-ad2a-ba9dd5a1814b/extract/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.621003 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.791445 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/pull/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.807713 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/pull/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.819383 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.954034 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/util/0.log" Jan 27 22:46:58 crc kubenswrapper[4793]: I0127 22:46:58.993518 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/pull/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.047335 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hjbdx_b691d932-0b30-4838-9566-a378435e170d/extract/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.150198 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-utilities/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.337423 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-content/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.345719 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-utilities/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.377654 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-content/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.545053 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-content/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.579436 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/extract-utilities/0.log" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.802701 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:46:59 crc kubenswrapper[4793]: E0127 22:46:59.803254 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:46:59 crc kubenswrapper[4793]: I0127 22:46:59.836501 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-utilities/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.067688 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-content/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.076014 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-content/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.142076 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-utilities/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.632883 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-content/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.679454 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/extract-utilities/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.722656 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2z9hn_7cc048be-c6e0-44b2-aed1-e1f20a5cbb8b/registry-server/0.log" Jan 27 22:47:00 crc kubenswrapper[4793]: I0127 22:47:00.905388 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-m7kkq_92d8b5ca-b574-449a-a93b-43722e02e624/marketplace-operator/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.160043 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-utilities/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.367973 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-utilities/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.472370 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-content/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.490297 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-content/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.636046 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-utilities/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.644901 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/extract-content/0.log" Jan 27 22:47:01 crc kubenswrapper[4793]: I0127 22:47:01.881876 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-utilities/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.111803 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-content/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.133398 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-content/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.139570 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-utilities/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.274630 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-dndx6_b7e8d64f-44af-47e1-a656-34aace0833cf/registry-server/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.298975 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6z2jf_722970b7-fdc7-44ab-a809-4e55d8ac772a/registry-server/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.331506 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-utilities/0.log" Jan 27 22:47:02 crc kubenswrapper[4793]: I0127 22:47:02.356484 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/extract-content/0.log" Jan 27 22:47:03 crc kubenswrapper[4793]: I0127 22:47:03.516247 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6vqft_48ec0076-1321-431f-8d4a-06ab47d87847/registry-server/0.log" Jan 27 22:47:08 crc kubenswrapper[4793]: I0127 22:47:08.978795 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z4wwg"] Jan 27 22:47:08 crc kubenswrapper[4793]: E0127 22:47:08.980157 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" containerName="collect-profiles" Jan 27 22:47:08 crc kubenswrapper[4793]: I0127 22:47:08.980181 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" containerName="collect-profiles" Jan 27 22:47:08 crc kubenswrapper[4793]: I0127 22:47:08.980646 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="23289cee-e86d-49fe-9bd9-27a6f3b6ccaf" containerName="collect-profiles" Jan 27 22:47:08 crc kubenswrapper[4793]: I0127 22:47:08.983534 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:08 crc kubenswrapper[4793]: I0127 22:47:08.993387 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4wwg"] Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.097715 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-catalog-content\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.098122 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-utilities\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.098221 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6k2r\" (UniqueName: \"kubernetes.io/projected/f7fc67d2-18d3-4649-8d44-728e4859ec79-kube-api-access-g6k2r\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.284303 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-catalog-content\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.284468 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-utilities\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.284497 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6k2r\" (UniqueName: \"kubernetes.io/projected/f7fc67d2-18d3-4649-8d44-728e4859ec79-kube-api-access-g6k2r\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.294292 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-catalog-content\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.295249 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc67d2-18d3-4649-8d44-728e4859ec79-utilities\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.333687 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6k2r\" (UniqueName: \"kubernetes.io/projected/f7fc67d2-18d3-4649-8d44-728e4859ec79-kube-api-access-g6k2r\") pod \"redhat-marketplace-z4wwg\" (UID: \"f7fc67d2-18d3-4649-8d44-728e4859ec79\") " pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:09 crc kubenswrapper[4793]: I0127 22:47:09.603997 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:10 crc kubenswrapper[4793]: I0127 22:47:10.127358 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4wwg"] Jan 27 22:47:10 crc kubenswrapper[4793]: I0127 22:47:10.309484 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4wwg" event={"ID":"f7fc67d2-18d3-4649-8d44-728e4859ec79","Type":"ContainerStarted","Data":"1f7bd34e637643a64a681ef775ea469c321144cfd8a995008c68f4325e4b6fb4"} Jan 27 22:47:11 crc kubenswrapper[4793]: I0127 22:47:11.323622 4793 generic.go:334] "Generic (PLEG): container finished" podID="f7fc67d2-18d3-4649-8d44-728e4859ec79" containerID="01a4c250779abd02d706b1dc16709dace8169e1238dff658d74af65862af8069" exitCode=0 Jan 27 22:47:11 crc kubenswrapper[4793]: I0127 22:47:11.323712 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4wwg" event={"ID":"f7fc67d2-18d3-4649-8d44-728e4859ec79","Type":"ContainerDied","Data":"01a4c250779abd02d706b1dc16709dace8169e1238dff658d74af65862af8069"} Jan 27 22:47:11 crc kubenswrapper[4793]: I0127 22:47:11.326801 4793 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 22:47:12 crc kubenswrapper[4793]: I0127 22:47:12.807837 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:47:12 crc kubenswrapper[4793]: E0127 22:47:12.808322 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:47:12 crc kubenswrapper[4793]: I0127 22:47:12.808590 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:47:12 crc kubenswrapper[4793]: E0127 22:47:12.808901 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:47:17 crc kubenswrapper[4793]: I0127 22:47:17.405226 4793 generic.go:334] "Generic (PLEG): container finished" podID="f7fc67d2-18d3-4649-8d44-728e4859ec79" containerID="3aa6caa7f67bafc9aba2dc7c79f8e80ef35b7af801ffdb033ff0988fea515490" exitCode=0 Jan 27 22:47:17 crc kubenswrapper[4793]: I0127 22:47:17.405272 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4wwg" event={"ID":"f7fc67d2-18d3-4649-8d44-728e4859ec79","Type":"ContainerDied","Data":"3aa6caa7f67bafc9aba2dc7c79f8e80ef35b7af801ffdb033ff0988fea515490"} Jan 27 22:47:18 crc kubenswrapper[4793]: I0127 22:47:18.420297 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4wwg" event={"ID":"f7fc67d2-18d3-4649-8d44-728e4859ec79","Type":"ContainerStarted","Data":"f7aae3308c0ba6fe6a2bd78484709adeb4050713b0d0eadb6b8231d4055aaac2"} Jan 27 22:47:18 crc kubenswrapper[4793]: I0127 22:47:18.457799 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z4wwg" podStartSLOduration=4.001092023 podStartE2EDuration="10.457779472s" podCreationTimestamp="2026-01-27 22:47:08 +0000 UTC" firstStartedPulling="2026-01-27 22:47:11.326516442 +0000 UTC m=+9856.716769598" lastFinishedPulling="2026-01-27 22:47:17.783203891 +0000 UTC m=+9863.173457047" observedRunningTime="2026-01-27 22:47:18.449976595 +0000 UTC m=+9863.840229741" watchObservedRunningTime="2026-01-27 22:47:18.457779472 +0000 UTC m=+9863.848032628" Jan 27 22:47:19 crc kubenswrapper[4793]: I0127 22:47:19.604832 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:19 crc kubenswrapper[4793]: I0127 22:47:19.605215 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:20 crc kubenswrapper[4793]: I0127 22:47:20.664977 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-z4wwg" podUID="f7fc67d2-18d3-4649-8d44-728e4859ec79" containerName="registry-server" probeResult="failure" output=< Jan 27 22:47:20 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:47:20 crc kubenswrapper[4793]: > Jan 27 22:47:21 crc kubenswrapper[4793]: I0127 22:47:21.619085 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-767f7dcbf-zhcbh_cdf9943f-a502-4e73-bbb7-5b638de02443/prometheus-operator-admission-webhook/0.log" Jan 27 22:47:21 crc kubenswrapper[4793]: I0127 22:47:21.629275 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-767f7dcbf-qfb59_15c11c1c-e768-4e99-ac32-157dbd118043/prometheus-operator-admission-webhook/0.log" Jan 27 22:47:21 crc kubenswrapper[4793]: I0127 22:47:21.647664 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-557c7_d778ab64-052f-4ffb-a80d-52d1807a499c/prometheus-operator/0.log" Jan 27 22:47:21 crc kubenswrapper[4793]: I0127 22:47:21.840688 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-x6b5g_5f176291-9b04-49fc-ad48-fe1552b2bcaf/perses-operator/0.log" Jan 27 22:47:21 crc kubenswrapper[4793]: I0127 22:47:21.891006 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-ml85d_b58cf118-0ae5-43f3-bf3c-f5f01eb636ba/operator/0.log" Jan 27 22:47:23 crc kubenswrapper[4793]: I0127 22:47:23.803829 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:47:23 crc kubenswrapper[4793]: E0127 22:47:23.804646 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:47:26 crc kubenswrapper[4793]: I0127 22:47:26.804422 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:47:26 crc kubenswrapper[4793]: E0127 22:47:26.805118 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:47:29 crc kubenswrapper[4793]: I0127 22:47:29.832692 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:29 crc kubenswrapper[4793]: I0127 22:47:29.893466 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z4wwg" Jan 27 22:47:30 crc kubenswrapper[4793]: I0127 22:47:30.884047 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4wwg"] Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.063172 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.063669 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dndx6" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="registry-server" containerID="cri-o://1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3" gracePeriod=2 Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.597087 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.797289 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content\") pod \"b7e8d64f-44af-47e1-a656-34aace0833cf\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.797848 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities\") pod \"b7e8d64f-44af-47e1-a656-34aace0833cf\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.797909 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llsqz\" (UniqueName: \"kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz\") pod \"b7e8d64f-44af-47e1-a656-34aace0833cf\" (UID: \"b7e8d64f-44af-47e1-a656-34aace0833cf\") " Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.798474 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities" (OuterVolumeSpecName: "utilities") pod "b7e8d64f-44af-47e1-a656-34aace0833cf" (UID: "b7e8d64f-44af-47e1-a656-34aace0833cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.800064 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.800053 4793 generic.go:334] "Generic (PLEG): container finished" podID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerID="1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3" exitCode=0 Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.800098 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerDied","Data":"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3"} Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.800139 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dndx6" event={"ID":"b7e8d64f-44af-47e1-a656-34aace0833cf","Type":"ContainerDied","Data":"8f6933c5c59e2cf118dd869edec9538a2ead232103d9c1dc0140d7b49563eaf2"} Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.800160 4793 scope.go:117] "RemoveContainer" containerID="1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.801200 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dndx6" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.805919 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz" (OuterVolumeSpecName: "kube-api-access-llsqz") pod "b7e8d64f-44af-47e1-a656-34aace0833cf" (UID: "b7e8d64f-44af-47e1-a656-34aace0833cf"). InnerVolumeSpecName "kube-api-access-llsqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.831448 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7e8d64f-44af-47e1-a656-34aace0833cf" (UID: "b7e8d64f-44af-47e1-a656-34aace0833cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.897030 4793 scope.go:117] "RemoveContainer" containerID="e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.903223 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7e8d64f-44af-47e1-a656-34aace0833cf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.903249 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llsqz\" (UniqueName: \"kubernetes.io/projected/b7e8d64f-44af-47e1-a656-34aace0833cf-kube-api-access-llsqz\") on node \"crc\" DevicePath \"\"" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.935542 4793 scope.go:117] "RemoveContainer" containerID="95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5" Jan 27 22:47:31 crc kubenswrapper[4793]: I0127 22:47:31.982729 4793 scope.go:117] "RemoveContainer" containerID="1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3" Jan 27 22:47:32 crc kubenswrapper[4793]: E0127 22:47:32.279904 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3\": container with ID starting with 1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3 not found: ID does not exist" containerID="1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.279974 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3"} err="failed to get container status \"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3\": rpc error: code = NotFound desc = could not find container \"1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3\": container with ID starting with 1f40e08a726f68206d0107e8701792e9602bbbfc22062e42703b9a535de60ea3 not found: ID does not exist" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.280010 4793 scope.go:117] "RemoveContainer" containerID="e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f" Jan 27 22:47:32 crc kubenswrapper[4793]: E0127 22:47:32.282579 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f\": container with ID starting with e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f not found: ID does not exist" containerID="e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.282607 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f"} err="failed to get container status \"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f\": rpc error: code = NotFound desc = could not find container \"e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f\": container with ID starting with e83115be42f7b6b6e374d33e41810f29566106bc050534af643f2c97fd6d392f not found: ID does not exist" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.282625 4793 scope.go:117] "RemoveContainer" containerID="95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5" Jan 27 22:47:32 crc kubenswrapper[4793]: E0127 22:47:32.289914 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5\": container with ID starting with 95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5 not found: ID does not exist" containerID="95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.289958 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5"} err="failed to get container status \"95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5\": rpc error: code = NotFound desc = could not find container \"95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5\": container with ID starting with 95a17cc9b1a398ec3ad88daf7b172a64c308e2b37168c1a61e577b4d39c2b3d5 not found: ID does not exist" Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.356651 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 22:47:32 crc kubenswrapper[4793]: I0127 22:47:32.367193 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dndx6"] Jan 27 22:47:33 crc kubenswrapper[4793]: I0127 22:47:33.813411 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" path="/var/lib/kubelet/pods/b7e8d64f-44af-47e1-a656-34aace0833cf/volumes" Jan 27 22:47:34 crc kubenswrapper[4793]: I0127 22:47:34.802986 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:47:34 crc kubenswrapper[4793]: E0127 22:47:34.803657 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:47:38 crc kubenswrapper[4793]: I0127 22:47:38.803772 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:47:38 crc kubenswrapper[4793]: E0127 22:47:38.804716 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:47:47 crc kubenswrapper[4793]: I0127 22:47:47.803323 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:47:47 crc kubenswrapper[4793]: E0127 22:47:47.804027 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:47:51 crc kubenswrapper[4793]: I0127 22:47:51.803495 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:47:51 crc kubenswrapper[4793]: E0127 22:47:51.804262 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:47:59 crc kubenswrapper[4793]: I0127 22:47:59.803988 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:48:01 crc kubenswrapper[4793]: I0127 22:48:01.243198 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640"} Jan 27 22:48:02 crc kubenswrapper[4793]: I0127 22:48:02.924618 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:48:02 crc kubenswrapper[4793]: E0127 22:48:02.925337 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:48:16 crc kubenswrapper[4793]: I0127 22:48:16.803844 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:48:16 crc kubenswrapper[4793]: E0127 22:48:16.804941 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:48:30 crc kubenswrapper[4793]: I0127 22:48:30.804473 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:48:30 crc kubenswrapper[4793]: E0127 22:48:30.805344 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:48:40 crc kubenswrapper[4793]: I0127 22:48:40.243030 4793 scope.go:117] "RemoveContainer" containerID="d902aeed4c051abacbb2ae03a374b91aa497243fc85f7393148e0022f704bd54" Jan 27 22:48:42 crc kubenswrapper[4793]: I0127 22:48:42.803200 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:48:42 crc kubenswrapper[4793]: E0127 22:48:42.804061 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:48:57 crc kubenswrapper[4793]: I0127 22:48:57.803612 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:48:57 crc kubenswrapper[4793]: E0127 22:48:57.805449 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:49:10 crc kubenswrapper[4793]: I0127 22:49:10.803288 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:49:10 crc kubenswrapper[4793]: E0127 22:49:10.804172 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:49:24 crc kubenswrapper[4793]: I0127 22:49:24.805678 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:49:24 crc kubenswrapper[4793]: E0127 22:49:24.808182 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.096995 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:27 crc kubenswrapper[4793]: E0127 22:49:27.099179 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="extract-content" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.099218 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="extract-content" Jan 27 22:49:27 crc kubenswrapper[4793]: E0127 22:49:27.099250 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="registry-server" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.099262 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="registry-server" Jan 27 22:49:27 crc kubenswrapper[4793]: E0127 22:49:27.099311 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="extract-utilities" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.099324 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="extract-utilities" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.099799 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7e8d64f-44af-47e1-a656-34aace0833cf" containerName="registry-server" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.137457 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.137600 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.295620 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.295738 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.295790 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn49l\" (UniqueName: \"kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.397539 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.397652 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn49l\" (UniqueName: \"kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.397781 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.398236 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.398461 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.433161 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn49l\" (UniqueName: \"kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l\") pod \"community-operators-8jvw9\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:27 crc kubenswrapper[4793]: I0127 22:49:27.473292 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:28 crc kubenswrapper[4793]: I0127 22:49:28.098713 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:28 crc kubenswrapper[4793]: I0127 22:49:28.827043 4793 generic.go:334] "Generic (PLEG): container finished" podID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerID="9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090" exitCode=0 Jan 27 22:49:28 crc kubenswrapper[4793]: I0127 22:49:28.827096 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerDied","Data":"9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090"} Jan 27 22:49:28 crc kubenswrapper[4793]: I0127 22:49:28.827460 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerStarted","Data":"da9eac6202e40355deedf4227963ed2b84313eca0682b92084a1a98d5129f620"} Jan 27 22:49:29 crc kubenswrapper[4793]: I0127 22:49:29.869248 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerStarted","Data":"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d"} Jan 27 22:49:31 crc kubenswrapper[4793]: I0127 22:49:31.901957 4793 generic.go:334] "Generic (PLEG): container finished" podID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerID="181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d" exitCode=0 Jan 27 22:49:31 crc kubenswrapper[4793]: I0127 22:49:31.902236 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerDied","Data":"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d"} Jan 27 22:49:32 crc kubenswrapper[4793]: I0127 22:49:32.916405 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerStarted","Data":"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0"} Jan 27 22:49:32 crc kubenswrapper[4793]: I0127 22:49:32.944689 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8jvw9" podStartSLOduration=2.391596738 podStartE2EDuration="5.944661561s" podCreationTimestamp="2026-01-27 22:49:27 +0000 UTC" firstStartedPulling="2026-01-27 22:49:28.831239355 +0000 UTC m=+9994.221492551" lastFinishedPulling="2026-01-27 22:49:32.384304178 +0000 UTC m=+9997.774557374" observedRunningTime="2026-01-27 22:49:32.938463912 +0000 UTC m=+9998.328717078" watchObservedRunningTime="2026-01-27 22:49:32.944661561 +0000 UTC m=+9998.334914727" Jan 27 22:49:34 crc kubenswrapper[4793]: I0127 22:49:34.940229 4793 generic.go:334] "Generic (PLEG): container finished" podID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerID="59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841" exitCode=0 Jan 27 22:49:34 crc kubenswrapper[4793]: I0127 22:49:34.940456 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-s64mx/must-gather-bsnf7" event={"ID":"b8f70002-f5f1-4e33-99ec-385dcde56935","Type":"ContainerDied","Data":"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841"} Jan 27 22:49:34 crc kubenswrapper[4793]: I0127 22:49:34.942307 4793 scope.go:117] "RemoveContainer" containerID="59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841" Jan 27 22:49:35 crc kubenswrapper[4793]: I0127 22:49:35.399728 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-s64mx_must-gather-bsnf7_b8f70002-f5f1-4e33-99ec-385dcde56935/gather/0.log" Jan 27 22:49:36 crc kubenswrapper[4793]: I0127 22:49:36.804460 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:49:36 crc kubenswrapper[4793]: E0127 22:49:36.805768 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:49:37 crc kubenswrapper[4793]: I0127 22:49:37.474668 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:37 crc kubenswrapper[4793]: I0127 22:49:37.474728 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:37 crc kubenswrapper[4793]: I0127 22:49:37.540948 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:38 crc kubenswrapper[4793]: I0127 22:49:38.062807 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:38 crc kubenswrapper[4793]: I0127 22:49:38.129464 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:40 crc kubenswrapper[4793]: I0127 22:49:40.026297 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8jvw9" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="registry-server" containerID="cri-o://65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0" gracePeriod=2 Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.375921 4793 scope.go:117] "RemoveContainer" containerID="822d1de0d05633f587fb5775856608b265ebe3856f0f9e876e68383dbf58f542" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.582111 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.731049 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pn49l\" (UniqueName: \"kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l\") pod \"cb45d505-26cd-4451-9f4c-737b44bf987a\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.731345 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content\") pod \"cb45d505-26cd-4451-9f4c-737b44bf987a\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.731438 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities\") pod \"cb45d505-26cd-4451-9f4c-737b44bf987a\" (UID: \"cb45d505-26cd-4451-9f4c-737b44bf987a\") " Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.733050 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities" (OuterVolumeSpecName: "utilities") pod "cb45d505-26cd-4451-9f4c-737b44bf987a" (UID: "cb45d505-26cd-4451-9f4c-737b44bf987a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.741014 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l" (OuterVolumeSpecName: "kube-api-access-pn49l") pod "cb45d505-26cd-4451-9f4c-737b44bf987a" (UID: "cb45d505-26cd-4451-9f4c-737b44bf987a"). InnerVolumeSpecName "kube-api-access-pn49l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.785106 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb45d505-26cd-4451-9f4c-737b44bf987a" (UID: "cb45d505-26cd-4451-9f4c-737b44bf987a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.834099 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pn49l\" (UniqueName: \"kubernetes.io/projected/cb45d505-26cd-4451-9f4c-737b44bf987a-kube-api-access-pn49l\") on node \"crc\" DevicePath \"\"" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.834135 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:40.834155 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb45d505-26cd-4451-9f4c-737b44bf987a-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.045959 4793 generic.go:334] "Generic (PLEG): container finished" podID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerID="65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0" exitCode=0 Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.046024 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8jvw9" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.046045 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerDied","Data":"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0"} Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.050026 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8jvw9" event={"ID":"cb45d505-26cd-4451-9f4c-737b44bf987a","Type":"ContainerDied","Data":"da9eac6202e40355deedf4227963ed2b84313eca0682b92084a1a98d5129f620"} Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.050060 4793 scope.go:117] "RemoveContainer" containerID="65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.088341 4793 scope.go:117] "RemoveContainer" containerID="181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.098211 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.111972 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8jvw9"] Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.112990 4793 scope.go:117] "RemoveContainer" containerID="9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.135343 4793 scope.go:117] "RemoveContainer" containerID="65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0" Jan 27 22:49:41 crc kubenswrapper[4793]: E0127 22:49:41.135863 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0\": container with ID starting with 65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0 not found: ID does not exist" containerID="65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.135961 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0"} err="failed to get container status \"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0\": rpc error: code = NotFound desc = could not find container \"65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0\": container with ID starting with 65dd7dc71e353c6b11d911c9988030715d283ff6f21ea81c971dc56edc47bcc0 not found: ID does not exist" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.136001 4793 scope.go:117] "RemoveContainer" containerID="181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d" Jan 27 22:49:41 crc kubenswrapper[4793]: E0127 22:49:41.136334 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d\": container with ID starting with 181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d not found: ID does not exist" containerID="181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.136368 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d"} err="failed to get container status \"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d\": rpc error: code = NotFound desc = could not find container \"181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d\": container with ID starting with 181a1cda8b7e5390aca24332febe389fb8706842dfd0c06854e8da99253bc55d not found: ID does not exist" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.136390 4793 scope.go:117] "RemoveContainer" containerID="9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090" Jan 27 22:49:41 crc kubenswrapper[4793]: E0127 22:49:41.136659 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090\": container with ID starting with 9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090 not found: ID does not exist" containerID="9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.136693 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090"} err="failed to get container status \"9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090\": rpc error: code = NotFound desc = could not find container \"9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090\": container with ID starting with 9bc54f2b4c242933a2308c697ad4d0747180fe0cb4cc36f651ceaaa7d3b0e090 not found: ID does not exist" Jan 27 22:49:41 crc kubenswrapper[4793]: I0127 22:49:41.814841 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" path="/var/lib/kubelet/pods/cb45d505-26cd-4451-9f4c-737b44bf987a/volumes" Jan 27 22:49:46 crc kubenswrapper[4793]: I0127 22:49:46.499715 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-s64mx/must-gather-bsnf7"] Jan 27 22:49:46 crc kubenswrapper[4793]: I0127 22:49:46.500626 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-s64mx/must-gather-bsnf7" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="copy" containerID="cri-o://fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e" gracePeriod=2 Jan 27 22:49:46 crc kubenswrapper[4793]: I0127 22:49:46.508469 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-s64mx/must-gather-bsnf7"] Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.024952 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-s64mx_must-gather-bsnf7_b8f70002-f5f1-4e33-99ec-385dcde56935/copy/0.log" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.025632 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.100150 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cplnr\" (UniqueName: \"kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr\") pod \"b8f70002-f5f1-4e33-99ec-385dcde56935\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.100637 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output\") pod \"b8f70002-f5f1-4e33-99ec-385dcde56935\" (UID: \"b8f70002-f5f1-4e33-99ec-385dcde56935\") " Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.108931 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr" (OuterVolumeSpecName: "kube-api-access-cplnr") pod "b8f70002-f5f1-4e33-99ec-385dcde56935" (UID: "b8f70002-f5f1-4e33-99ec-385dcde56935"). InnerVolumeSpecName "kube-api-access-cplnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.110026 4793 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-s64mx_must-gather-bsnf7_b8f70002-f5f1-4e33-99ec-385dcde56935/copy/0.log" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.110406 4793 generic.go:334] "Generic (PLEG): container finished" podID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerID="fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e" exitCode=143 Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.110461 4793 scope.go:117] "RemoveContainer" containerID="fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.110493 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-s64mx/must-gather-bsnf7" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.181675 4793 scope.go:117] "RemoveContainer" containerID="59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.203156 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cplnr\" (UniqueName: \"kubernetes.io/projected/b8f70002-f5f1-4e33-99ec-385dcde56935-kube-api-access-cplnr\") on node \"crc\" DevicePath \"\"" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.277597 4793 scope.go:117] "RemoveContainer" containerID="fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e" Jan 27 22:49:47 crc kubenswrapper[4793]: E0127 22:49:47.278954 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e\": container with ID starting with fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e not found: ID does not exist" containerID="fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.279002 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e"} err="failed to get container status \"fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e\": rpc error: code = NotFound desc = could not find container \"fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e\": container with ID starting with fe7b4fda3fa02a8b22b2e634afb961df5ef167ab15e3cf63352c3712affc1c2e not found: ID does not exist" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.279028 4793 scope.go:117] "RemoveContainer" containerID="59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841" Jan 27 22:49:47 crc kubenswrapper[4793]: E0127 22:49:47.279426 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841\": container with ID starting with 59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841 not found: ID does not exist" containerID="59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.279463 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841"} err="failed to get container status \"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841\": rpc error: code = NotFound desc = could not find container \"59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841\": container with ID starting with 59d4a95a174bc168546cae3d2f99065a85ea8530df7b05d7ad6a5561f4dc4841 not found: ID does not exist" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.348903 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b8f70002-f5f1-4e33-99ec-385dcde56935" (UID: "b8f70002-f5f1-4e33-99ec-385dcde56935"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.407314 4793 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8f70002-f5f1-4e33-99ec-385dcde56935-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 27 22:49:47 crc kubenswrapper[4793]: I0127 22:49:47.816512 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" path="/var/lib/kubelet/pods/b8f70002-f5f1-4e33-99ec-385dcde56935/volumes" Jan 27 22:49:50 crc kubenswrapper[4793]: I0127 22:49:50.804102 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:49:50 crc kubenswrapper[4793]: E0127 22:49:50.805147 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:02 crc kubenswrapper[4793]: I0127 22:50:02.803370 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:50:02 crc kubenswrapper[4793]: E0127 22:50:02.804180 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:16 crc kubenswrapper[4793]: I0127 22:50:16.804126 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:50:16 crc kubenswrapper[4793]: E0127 22:50:16.805652 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.680687 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:20 crc kubenswrapper[4793]: E0127 22:50:20.682424 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="gather" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.682458 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="gather" Jan 27 22:50:20 crc kubenswrapper[4793]: E0127 22:50:20.682498 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="extract-utilities" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.682520 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="extract-utilities" Jan 27 22:50:20 crc kubenswrapper[4793]: E0127 22:50:20.682590 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="copy" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.682608 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="copy" Jan 27 22:50:20 crc kubenswrapper[4793]: E0127 22:50:20.682644 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="extract-content" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.682664 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="extract-content" Jan 27 22:50:20 crc kubenswrapper[4793]: E0127 22:50:20.682733 4793 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="registry-server" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.682752 4793 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="registry-server" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.683241 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="gather" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.683294 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f70002-f5f1-4e33-99ec-385dcde56935" containerName="copy" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.683334 4793 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb45d505-26cd-4451-9f4c-737b44bf987a" containerName="registry-server" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.687168 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.701204 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.865576 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djcw9\" (UniqueName: \"kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.865898 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.866116 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.968188 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.968629 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djcw9\" (UniqueName: \"kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.968783 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.969126 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:20 crc kubenswrapper[4793]: I0127 22:50:20.969750 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:21 crc kubenswrapper[4793]: I0127 22:50:21.010081 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djcw9\" (UniqueName: \"kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9\") pod \"certified-operators-xl5qr\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:21 crc kubenswrapper[4793]: I0127 22:50:21.039944 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:21 crc kubenswrapper[4793]: I0127 22:50:21.580847 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:22 crc kubenswrapper[4793]: I0127 22:50:22.588345 4793 generic.go:334] "Generic (PLEG): container finished" podID="ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" containerID="9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95" exitCode=0 Jan 27 22:50:22 crc kubenswrapper[4793]: I0127 22:50:22.591156 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerDied","Data":"9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95"} Jan 27 22:50:22 crc kubenswrapper[4793]: I0127 22:50:22.591254 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerStarted","Data":"9ec9eb59c3d8e389ed29424b3f3d8388a04a54634c8079b763c8482d471321ca"} Jan 27 22:50:22 crc kubenswrapper[4793]: I0127 22:50:22.753819 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:50:22 crc kubenswrapper[4793]: I0127 22:50:22.753921 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:50:23 crc kubenswrapper[4793]: I0127 22:50:23.603537 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerStarted","Data":"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d"} Jan 27 22:50:23 crc kubenswrapper[4793]: I0127 22:50:23.855950 4793 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4ghgh"] Jan 27 22:50:23 crc kubenswrapper[4793]: I0127 22:50:23.861692 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:23 crc kubenswrapper[4793]: I0127 22:50:23.871857 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4ghgh"] Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.560921 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vndk9\" (UniqueName: \"kubernetes.io/projected/8a672e6b-643e-4405-9263-5942912dca8a-kube-api-access-vndk9\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.561020 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-catalog-content\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.561202 4793 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-utilities\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.663532 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-utilities\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.663638 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vndk9\" (UniqueName: \"kubernetes.io/projected/8a672e6b-643e-4405-9263-5942912dca8a-kube-api-access-vndk9\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.663684 4793 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-catalog-content\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.664223 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-catalog-content\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.664484 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a672e6b-643e-4405-9263-5942912dca8a-utilities\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.684237 4793 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vndk9\" (UniqueName: \"kubernetes.io/projected/8a672e6b-643e-4405-9263-5942912dca8a-kube-api-access-vndk9\") pod \"redhat-operators-4ghgh\" (UID: \"8a672e6b-643e-4405-9263-5942912dca8a\") " pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:24 crc kubenswrapper[4793]: I0127 22:50:24.787080 4793 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.308241 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4ghgh"] Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.625674 4793 generic.go:334] "Generic (PLEG): container finished" podID="ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" containerID="0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d" exitCode=0 Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.625757 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerDied","Data":"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d"} Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.628813 4793 generic.go:334] "Generic (PLEG): container finished" podID="8a672e6b-643e-4405-9263-5942912dca8a" containerID="e3b437ef10ee2cbc0e15e29f9345a8fdfe3799b2eb4c1a78d777ff20ed77e4bc" exitCode=0 Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.628919 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ghgh" event={"ID":"8a672e6b-643e-4405-9263-5942912dca8a","Type":"ContainerDied","Data":"e3b437ef10ee2cbc0e15e29f9345a8fdfe3799b2eb4c1a78d777ff20ed77e4bc"} Jan 27 22:50:25 crc kubenswrapper[4793]: I0127 22:50:25.629007 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ghgh" event={"ID":"8a672e6b-643e-4405-9263-5942912dca8a","Type":"ContainerStarted","Data":"3323a5ff2b2e28ec68ad004764c790d1e227bb91be1732e29c456852848887a1"} Jan 27 22:50:26 crc kubenswrapper[4793]: I0127 22:50:26.643786 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerStarted","Data":"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a"} Jan 27 22:50:26 crc kubenswrapper[4793]: I0127 22:50:26.677921 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xl5qr" podStartSLOduration=3.247925884 podStartE2EDuration="6.677898185s" podCreationTimestamp="2026-01-27 22:50:20 +0000 UTC" firstStartedPulling="2026-01-27 22:50:22.592516744 +0000 UTC m=+10047.982769920" lastFinishedPulling="2026-01-27 22:50:26.022489065 +0000 UTC m=+10051.412742221" observedRunningTime="2026-01-27 22:50:26.6747796 +0000 UTC m=+10052.065032786" watchObservedRunningTime="2026-01-27 22:50:26.677898185 +0000 UTC m=+10052.068151351" Jan 27 22:50:28 crc kubenswrapper[4793]: I0127 22:50:28.804672 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:50:28 crc kubenswrapper[4793]: E0127 22:50:28.805452 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:31 crc kubenswrapper[4793]: I0127 22:50:31.040938 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:31 crc kubenswrapper[4793]: I0127 22:50:31.041624 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:31 crc kubenswrapper[4793]: I0127 22:50:31.090813 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:31 crc kubenswrapper[4793]: I0127 22:50:31.779976 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:34 crc kubenswrapper[4793]: I0127 22:50:34.848132 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:34 crc kubenswrapper[4793]: I0127 22:50:34.848911 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xl5qr" podUID="ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" containerName="registry-server" containerID="cri-o://119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a" gracePeriod=2 Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.700999 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.756582 4793 generic.go:334] "Generic (PLEG): container finished" podID="ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" containerID="119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a" exitCode=0 Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.756626 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerDied","Data":"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a"} Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.756671 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl5qr" event={"ID":"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3","Type":"ContainerDied","Data":"9ec9eb59c3d8e389ed29424b3f3d8388a04a54634c8079b763c8482d471321ca"} Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.756689 4793 scope.go:117] "RemoveContainer" containerID="119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.756691 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl5qr" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.759652 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ghgh" event={"ID":"8a672e6b-643e-4405-9263-5942912dca8a","Type":"ContainerStarted","Data":"733d19b2608ccea40cfba641e7383ebb5f9bd3938ab7e465b5a01a52cbed6c67"} Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.802943 4793 scope.go:117] "RemoveContainer" containerID="0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.855973 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content\") pod \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.856952 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities\") pod \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.857000 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djcw9\" (UniqueName: \"kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9\") pod \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\" (UID: \"ea6041a4-7e65-4dc0-aeb7-966534d0d4a3\") " Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.858336 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities" (OuterVolumeSpecName: "utilities") pod "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" (UID: "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.922030 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" (UID: "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.960629 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:35 crc kubenswrapper[4793]: I0127 22:50:35.960676 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.447906 4793 scope.go:117] "RemoveContainer" containerID="9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.448071 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9" (OuterVolumeSpecName: "kube-api-access-djcw9") pod "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" (UID: "ea6041a4-7e65-4dc0-aeb7-966534d0d4a3"). InnerVolumeSpecName "kube-api-access-djcw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.472217 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djcw9\" (UniqueName: \"kubernetes.io/projected/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3-kube-api-access-djcw9\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.698019 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.711677 4793 scope.go:117] "RemoveContainer" containerID="119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.712474 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xl5qr"] Jan 27 22:50:36 crc kubenswrapper[4793]: E0127 22:50:36.715165 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a\": container with ID starting with 119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a not found: ID does not exist" containerID="119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.715215 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a"} err="failed to get container status \"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a\": rpc error: code = NotFound desc = could not find container \"119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a\": container with ID starting with 119d85eca45f56ed02007d3036367827dd9e6d22a66db4a143dd9d0c799f600a not found: ID does not exist" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.715249 4793 scope.go:117] "RemoveContainer" containerID="0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d" Jan 27 22:50:36 crc kubenswrapper[4793]: E0127 22:50:36.715610 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d\": container with ID starting with 0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d not found: ID does not exist" containerID="0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.715638 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d"} err="failed to get container status \"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d\": rpc error: code = NotFound desc = could not find container \"0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d\": container with ID starting with 0ab3ef7b84e17f5435b73d16f00bc1d85a3b131e30db1ef580a1e1ed90f1aa4d not found: ID does not exist" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.715656 4793 scope.go:117] "RemoveContainer" containerID="9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95" Jan 27 22:50:36 crc kubenswrapper[4793]: E0127 22:50:36.716129 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95\": container with ID starting with 9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95 not found: ID does not exist" containerID="9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95" Jan 27 22:50:36 crc kubenswrapper[4793]: I0127 22:50:36.716158 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95"} err="failed to get container status \"9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95\": rpc error: code = NotFound desc = could not find container \"9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95\": container with ID starting with 9ef9e004012a80cb1d6ad53f7eeaab9902ee2ae1e1beab59649503defe1fbf95 not found: ID does not exist" Jan 27 22:50:37 crc kubenswrapper[4793]: I0127 22:50:37.824130 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea6041a4-7e65-4dc0-aeb7-966534d0d4a3" path="/var/lib/kubelet/pods/ea6041a4-7e65-4dc0-aeb7-966534d0d4a3/volumes" Jan 27 22:50:38 crc kubenswrapper[4793]: I0127 22:50:38.815257 4793 generic.go:334] "Generic (PLEG): container finished" podID="8a672e6b-643e-4405-9263-5942912dca8a" containerID="733d19b2608ccea40cfba641e7383ebb5f9bd3938ab7e465b5a01a52cbed6c67" exitCode=0 Jan 27 22:50:38 crc kubenswrapper[4793]: I0127 22:50:38.815364 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ghgh" event={"ID":"8a672e6b-643e-4405-9263-5942912dca8a","Type":"ContainerDied","Data":"733d19b2608ccea40cfba641e7383ebb5f9bd3938ab7e465b5a01a52cbed6c67"} Jan 27 22:50:39 crc kubenswrapper[4793]: I0127 22:50:39.804276 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:50:39 crc kubenswrapper[4793]: E0127 22:50:39.805043 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:39 crc kubenswrapper[4793]: I0127 22:50:39.825408 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ghgh" event={"ID":"8a672e6b-643e-4405-9263-5942912dca8a","Type":"ContainerStarted","Data":"6a0b12ce63e28288454efed8bf82f69a8d6017d296dec9bc662c86083579bb80"} Jan 27 22:50:39 crc kubenswrapper[4793]: I0127 22:50:39.850788 4793 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4ghgh" podStartSLOduration=3.246290471 podStartE2EDuration="16.850761817s" podCreationTimestamp="2026-01-27 22:50:23 +0000 UTC" firstStartedPulling="2026-01-27 22:50:25.633477055 +0000 UTC m=+10051.023730211" lastFinishedPulling="2026-01-27 22:50:39.237948391 +0000 UTC m=+10064.628201557" observedRunningTime="2026-01-27 22:50:39.842062998 +0000 UTC m=+10065.232316164" watchObservedRunningTime="2026-01-27 22:50:39.850761817 +0000 UTC m=+10065.241015003" Jan 27 22:50:44 crc kubenswrapper[4793]: I0127 22:50:44.787916 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:44 crc kubenswrapper[4793]: I0127 22:50:44.788705 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:45 crc kubenswrapper[4793]: I0127 22:50:45.852301 4793 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4ghgh" podUID="8a672e6b-643e-4405-9263-5942912dca8a" containerName="registry-server" probeResult="failure" output=< Jan 27 22:50:45 crc kubenswrapper[4793]: timeout: failed to connect service ":50051" within 1s Jan 27 22:50:45 crc kubenswrapper[4793]: > Jan 27 22:50:52 crc kubenswrapper[4793]: I0127 22:50:52.753623 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:50:52 crc kubenswrapper[4793]: I0127 22:50:52.754262 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:50:52 crc kubenswrapper[4793]: I0127 22:50:52.804674 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:50:52 crc kubenswrapper[4793]: E0127 22:50:52.805134 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:50:54 crc kubenswrapper[4793]: I0127 22:50:54.875214 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:54 crc kubenswrapper[4793]: I0127 22:50:54.938195 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4ghgh" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.033021 4793 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4ghgh"] Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.131116 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.131519 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6vqft" podUID="48ec0076-1321-431f-8d4a-06ab47d87847" containerName="registry-server" containerID="cri-o://a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622" gracePeriod=2 Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.635525 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.714884 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content\") pod \"48ec0076-1321-431f-8d4a-06ab47d87847\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.715059 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qmtp\" (UniqueName: \"kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp\") pod \"48ec0076-1321-431f-8d4a-06ab47d87847\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.715214 4793 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities\") pod \"48ec0076-1321-431f-8d4a-06ab47d87847\" (UID: \"48ec0076-1321-431f-8d4a-06ab47d87847\") " Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.715587 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities" (OuterVolumeSpecName: "utilities") pod "48ec0076-1321-431f-8d4a-06ab47d87847" (UID: "48ec0076-1321-431f-8d4a-06ab47d87847"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.716020 4793 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.730996 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp" (OuterVolumeSpecName: "kube-api-access-2qmtp") pod "48ec0076-1321-431f-8d4a-06ab47d87847" (UID: "48ec0076-1321-431f-8d4a-06ab47d87847"). InnerVolumeSpecName "kube-api-access-2qmtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.817741 4793 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qmtp\" (UniqueName: \"kubernetes.io/projected/48ec0076-1321-431f-8d4a-06ab47d87847-kube-api-access-2qmtp\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.831794 4793 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "48ec0076-1321-431f-8d4a-06ab47d87847" (UID: "48ec0076-1321-431f-8d4a-06ab47d87847"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 22:50:55 crc kubenswrapper[4793]: I0127 22:50:55.920506 4793 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ec0076-1321-431f-8d4a-06ab47d87847-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.038957 4793 generic.go:334] "Generic (PLEG): container finished" podID="48ec0076-1321-431f-8d4a-06ab47d87847" containerID="a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622" exitCode=0 Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.039072 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerDied","Data":"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622"} Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.039158 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6vqft" event={"ID":"48ec0076-1321-431f-8d4a-06ab47d87847","Type":"ContainerDied","Data":"012467b77d8f9f97b2300c6147a4a469871f838fac23be1dac915b08055711d6"} Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.039184 4793 scope.go:117] "RemoveContainer" containerID="a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.039334 4793 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6vqft" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.064958 4793 scope.go:117] "RemoveContainer" containerID="f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.077200 4793 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.089521 4793 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6vqft"] Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.100601 4793 scope.go:117] "RemoveContainer" containerID="266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.148686 4793 scope.go:117] "RemoveContainer" containerID="a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622" Jan 27 22:50:56 crc kubenswrapper[4793]: E0127 22:50:56.150339 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622\": container with ID starting with a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622 not found: ID does not exist" containerID="a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.150389 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622"} err="failed to get container status \"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622\": rpc error: code = NotFound desc = could not find container \"a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622\": container with ID starting with a5e51ae8d3ffef715937068c18bdb3e7c1c8c59525c959c29aa385f264f85622 not found: ID does not exist" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.150417 4793 scope.go:117] "RemoveContainer" containerID="f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95" Jan 27 22:50:56 crc kubenswrapper[4793]: E0127 22:50:56.150777 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95\": container with ID starting with f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95 not found: ID does not exist" containerID="f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.150804 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95"} err="failed to get container status \"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95\": rpc error: code = NotFound desc = could not find container \"f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95\": container with ID starting with f7f37952d7c11665dce7dd9ce61b99a84653e675be8c9bafbd0a958e8d192a95 not found: ID does not exist" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.150820 4793 scope.go:117] "RemoveContainer" containerID="266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508" Jan 27 22:50:56 crc kubenswrapper[4793]: E0127 22:50:56.151163 4793 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508\": container with ID starting with 266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508 not found: ID does not exist" containerID="266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508" Jan 27 22:50:56 crc kubenswrapper[4793]: I0127 22:50:56.151184 4793 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508"} err="failed to get container status \"266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508\": rpc error: code = NotFound desc = could not find container \"266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508\": container with ID starting with 266a149b9c875e49439012438e21db8dcd06d4c608a6a5b3d1a6ac7c4360e508 not found: ID does not exist" Jan 27 22:50:57 crc kubenswrapper[4793]: I0127 22:50:57.820789 4793 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48ec0076-1321-431f-8d4a-06ab47d87847" path="/var/lib/kubelet/pods/48ec0076-1321-431f-8d4a-06ab47d87847/volumes" Jan 27 22:51:05 crc kubenswrapper[4793]: I0127 22:51:05.838946 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:51:05 crc kubenswrapper[4793]: E0127 22:51:05.847855 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:51:16 crc kubenswrapper[4793]: I0127 22:51:16.804206 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:51:16 crc kubenswrapper[4793]: E0127 22:51:16.805343 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:51:22 crc kubenswrapper[4793]: I0127 22:51:22.753878 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:51:22 crc kubenswrapper[4793]: I0127 22:51:22.754601 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:51:22 crc kubenswrapper[4793]: I0127 22:51:22.754668 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:51:22 crc kubenswrapper[4793]: I0127 22:51:22.755587 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:51:22 crc kubenswrapper[4793]: I0127 22:51:22.755690 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640" gracePeriod=600 Jan 27 22:51:29 crc kubenswrapper[4793]: I0127 22:51:29.456746 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640" exitCode=0 Jan 27 22:51:29 crc kubenswrapper[4793]: I0127 22:51:29.456791 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640"} Jan 27 22:51:29 crc kubenswrapper[4793]: I0127 22:51:29.457270 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerStarted","Data":"227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229"} Jan 27 22:51:29 crc kubenswrapper[4793]: I0127 22:51:29.457299 4793 scope.go:117] "RemoveContainer" containerID="90b5733ca6815f99c359ce5446ff676d8ec47949766e04a0a646b79d10b93027" Jan 27 22:51:31 crc kubenswrapper[4793]: I0127 22:51:31.804422 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:51:31 crc kubenswrapper[4793]: E0127 22:51:31.805354 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:51:44 crc kubenswrapper[4793]: I0127 22:51:44.804132 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:51:45 crc kubenswrapper[4793]: I0127 22:51:45.677912 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerStarted","Data":"34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a"} Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.242733 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.243641 4793 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 27 22:51:48 crc kubenswrapper[4793]: E0127 22:51:48.501514 4793 log.go:32] "ExecSync cmd from runtime service failed" err=< Jan 27 22:51:48 crc kubenswrapper[4793]: rpc error: code = Unknown desc = command error: setns `mnt`: Bad file descriptor Jan 27 22:51:48 crc kubenswrapper[4793]: fail startup Jan 27 22:51:48 crc kubenswrapper[4793]: , stdout: , stderr: , exit code -1 Jan 27 22:51:48 crc kubenswrapper[4793]: > containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 22:51:48 crc kubenswrapper[4793]: E0127 22:51:48.502060 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a is running failed: container process not found" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 22:51:48 crc kubenswrapper[4793]: E0127 22:51:48.502444 4793 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a is running failed: container process not found" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Jan 27 22:51:48 crc kubenswrapper[4793]: E0127 22:51:48.502533 4793 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a is running failed: container process not found" probeType="Startup" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerName="watcher-applier" Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.714992 4793 generic.go:334] "Generic (PLEG): container finished" podID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" exitCode=1 Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.715099 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"045591bb-dd8c-437e-9cf8-0e0b520fc49d","Type":"ContainerDied","Data":"34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a"} Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.715415 4793 scope.go:117] "RemoveContainer" containerID="913cf98cf854bb6855d85771e242aa310ceb9a44e1b7201b1d142f3fcab0b3d1" Jan 27 22:51:48 crc kubenswrapper[4793]: I0127 22:51:48.716206 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:51:48 crc kubenswrapper[4793]: E0127 22:51:48.717265 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:51:49 crc kubenswrapper[4793]: I0127 22:51:49.731610 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:51:49 crc kubenswrapper[4793]: E0127 22:51:49.732534 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:51:58 crc kubenswrapper[4793]: I0127 22:51:58.242961 4793 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:51:58 crc kubenswrapper[4793]: I0127 22:51:58.243656 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-applier-0" Jan 27 22:51:58 crc kubenswrapper[4793]: I0127 22:51:58.244571 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:51:58 crc kubenswrapper[4793]: E0127 22:51:58.244896 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:52:11 crc kubenswrapper[4793]: I0127 22:52:11.806245 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:52:11 crc kubenswrapper[4793]: E0127 22:52:11.807157 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:52:26 crc kubenswrapper[4793]: I0127 22:52:26.804034 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:52:26 crc kubenswrapper[4793]: E0127 22:52:26.804964 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:52:41 crc kubenswrapper[4793]: I0127 22:52:41.803981 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:52:41 crc kubenswrapper[4793]: E0127 22:52:41.804808 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:52:52 crc kubenswrapper[4793]: I0127 22:52:52.804204 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:52:52 crc kubenswrapper[4793]: E0127 22:52:52.805323 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:53:07 crc kubenswrapper[4793]: I0127 22:53:07.804837 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:53:07 crc kubenswrapper[4793]: E0127 22:53:07.806089 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:53:18 crc kubenswrapper[4793]: I0127 22:53:18.803968 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:53:18 crc kubenswrapper[4793]: E0127 22:53:18.805058 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:53:32 crc kubenswrapper[4793]: I0127 22:53:32.805382 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:53:32 crc kubenswrapper[4793]: E0127 22:53:32.808161 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:53:44 crc kubenswrapper[4793]: I0127 22:53:44.804435 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:53:44 crc kubenswrapper[4793]: E0127 22:53:44.805463 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:53:52 crc kubenswrapper[4793]: I0127 22:53:52.754079 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:53:52 crc kubenswrapper[4793]: I0127 22:53:52.756474 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:53:57 crc kubenswrapper[4793]: I0127 22:53:57.803277 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:53:57 crc kubenswrapper[4793]: E0127 22:53:57.804131 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:54:12 crc kubenswrapper[4793]: I0127 22:54:12.805054 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:54:12 crc kubenswrapper[4793]: E0127 22:54:12.806253 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:54:22 crc kubenswrapper[4793]: I0127 22:54:22.753690 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:54:22 crc kubenswrapper[4793]: I0127 22:54:22.754425 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:54:25 crc kubenswrapper[4793]: I0127 22:54:25.813840 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:54:25 crc kubenswrapper[4793]: E0127 22:54:25.814746 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:54:38 crc kubenswrapper[4793]: I0127 22:54:38.804961 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:54:38 crc kubenswrapper[4793]: E0127 22:54:38.805596 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:54:51 crc kubenswrapper[4793]: I0127 22:54:51.806293 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:54:51 crc kubenswrapper[4793]: E0127 22:54:51.807387 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:54:52 crc kubenswrapper[4793]: I0127 22:54:52.753873 4793 patch_prober.go:28] interesting pod/machine-config-daemon-gq8gn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 22:54:52 crc kubenswrapper[4793]: I0127 22:54:52.754096 4793 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 22:54:52 crc kubenswrapper[4793]: I0127 22:54:52.754199 4793 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" Jan 27 22:54:52 crc kubenswrapper[4793]: I0127 22:54:52.756594 4793 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229"} pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 22:54:52 crc kubenswrapper[4793]: I0127 22:54:52.757193 4793 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerName="machine-config-daemon" containerID="cri-o://227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229" gracePeriod=600 Jan 27 22:54:52 crc kubenswrapper[4793]: E0127 22:54:52.888486 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:54:53 crc kubenswrapper[4793]: I0127 22:54:53.576160 4793 generic.go:334] "Generic (PLEG): container finished" podID="bb16a16f-6f5f-4462-be09-372a8b10739a" containerID="227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229" exitCode=0 Jan 27 22:54:53 crc kubenswrapper[4793]: I0127 22:54:53.576221 4793 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" event={"ID":"bb16a16f-6f5f-4462-be09-372a8b10739a","Type":"ContainerDied","Data":"227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229"} Jan 27 22:54:53 crc kubenswrapper[4793]: I0127 22:54:53.576270 4793 scope.go:117] "RemoveContainer" containerID="02811de85cd142064d9049f5bfecb2ab0a0bcf3e4487829503c2ea5729c08640" Jan 27 22:54:53 crc kubenswrapper[4793]: I0127 22:54:53.577084 4793 scope.go:117] "RemoveContainer" containerID="227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229" Jan 27 22:54:53 crc kubenswrapper[4793]: E0127 22:54:53.577418 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" Jan 27 22:55:05 crc kubenswrapper[4793]: I0127 22:55:05.814288 4793 scope.go:117] "RemoveContainer" containerID="34d6d2863f58ca34f7dd8b6de25652c303e0f6c95bb5f25021cd0e185111cf0a" Jan 27 22:55:05 crc kubenswrapper[4793]: E0127 22:55:05.815440 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=watcher-applier pod=watcher-applier-0_openstack(045591bb-dd8c-437e-9cf8-0e0b520fc49d)\"" pod="openstack/watcher-applier-0" podUID="045591bb-dd8c-437e-9cf8-0e0b520fc49d" Jan 27 22:55:07 crc kubenswrapper[4793]: I0127 22:55:07.803533 4793 scope.go:117] "RemoveContainer" containerID="227069325e7ca43826d847024fe46533513141f8d5a28398ce8d39cde5270229" Jan 27 22:55:07 crc kubenswrapper[4793]: E0127 22:55:07.804216 4793 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-gq8gn_openshift-machine-config-operator(bb16a16f-6f5f-4462-be09-372a8b10739a)\"" pod="openshift-machine-config-operator/machine-config-daemon-gq8gn" podUID="bb16a16f-6f5f-4462-be09-372a8b10739a" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136241134024445 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136241135017363 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136214407016510 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136214407015460 5ustar corecore